^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* iommu.c: Generic sparc64 IOMMU support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/iommu-helper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/iommu-common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #ifdef CONFIG_PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "iommu_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "kernel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define STC_CTXMATCH_ADDR(STC, CTX) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define STC_FLUSHFLAG_INIT(STC) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) (*((STC)->strbuf_flushflag) = 0UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define STC_FLUSHFLAG_SET(STC) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) (*((STC)->strbuf_flushflag) != 0UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define iommu_read(__reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) ({ u64 __ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) __asm__ __volatile__("ldxa [%1] %2, %0" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) : "=r" (__ret) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) : "memory"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) __ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define iommu_write(__reg, __val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) __asm__ __volatile__("stxa %0, [%1] %2" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) : /* no outputs */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) : "r" (__val), "r" (__reg), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) "i" (ASI_PHYS_BYPASS_EC_E))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* Must be invoked under the IOMMU lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static void iommu_flushall(struct iommu_map_table *iommu_map_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (iommu->iommu_flushinv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) iommu_write(iommu->iommu_flushinv, ~(u64)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned long tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) tag = iommu->iommu_tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) for (entry = 0; entry < 16; entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) iommu_write(tag, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) tag += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /* Ensure completion of previous PIO writes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) (void) iommu_read(iommu->write_complete_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define IOPTE_CONSISTENT(CTX) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) (IOPTE_VALID | IOPTE_CACHE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) (((CTX) << 47) & IOPTE_CONTEXT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define IOPTE_STREAMING(CTX) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* Existing mappings are never marked invalid, instead they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * are pointed to a dummy page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define IOPTE_IS_DUMMY(iommu, iopte) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) unsigned long val = iopte_val(*iopte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) val &= ~IOPTE_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) val |= iommu->dummy_page_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) iopte_val(*iopte) = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int iommu_table_init(struct iommu *iommu, int tsbsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u32 dma_offset, u32 dma_addr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int numa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) unsigned long i, order, sz, num_tsb_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) num_tsb_entries = tsbsize / sizeof(iopte_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Setup initial software IOMMU state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) spin_lock_init(&iommu->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) iommu->ctx_lowest_free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) iommu->tbl.table_map_base = dma_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) iommu->dma_addr_mask = dma_addr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Allocate and initialize the free area map. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) sz = num_tsb_entries / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) sz = (sz + 7UL) & ~7UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (!iommu->tbl.map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) (tlb_type != hypervisor ? iommu_flushall : NULL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) false, 1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Allocate and initialize the dummy page which we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * set inactive IO PTEs to point to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) goto out_free_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) iommu->dummy_page = (unsigned long) page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Now allocate and setup the IOMMU page table itself. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) order = get_order(tsbsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) page = alloc_pages_node(numa_node, GFP_KERNEL, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) goto out_free_dummy_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) iommu->page_table = (iopte_t *)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) for (i = 0; i < num_tsb_entries; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) iopte_make_dummy(iommu, &iommu->page_table[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) out_free_dummy_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) free_page(iommu->dummy_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) iommu->dummy_page = 0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) out_free_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) kfree(iommu->tbl.map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) iommu->tbl.map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static inline iopte_t *alloc_npages(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct iommu *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned long npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned long entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) (unsigned long)(-1), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (unlikely(entry == IOMMU_ERROR_CODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return iommu->page_table + entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static int iommu_alloc_ctx(struct iommu *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int lowest = iommu->ctx_lowest_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (unlikely(n == IOMMU_NUM_CTXS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (unlikely(n == lowest)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) __set_bit(n, iommu->ctx_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (likely(ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) __clear_bit(ctx, iommu->ctx_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (ctx < iommu->ctx_lowest_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) iommu->ctx_lowest_free = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) dma_addr_t *dma_addrp, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned long order, first_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) int npages, nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) iopte_t *iopte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) size = IO_PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) order = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (order >= 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) nid = dev->archdata.numa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) page = alloc_pages_node(nid, gfp, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (unlikely(!page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) first_page = (unsigned long) page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) memset((char *)first_page, 0, PAGE_SIZE << order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (unlikely(iopte == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) free_pages(first_page, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *dma_addrp = (iommu->tbl.table_map_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ret = (void *) first_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) npages = size >> IO_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) first_page = __pa(first_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) while (npages--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) IOPTE_WRITE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) (first_page & IOPTE_PAGE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) iopte++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) first_page += IO_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static void dma_4u_free_coherent(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) void *cpu, dma_addr_t dvma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) unsigned long order, npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) order = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (order < 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) free_pages((unsigned long)cpu, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) unsigned long offset, size_t sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct strbuf *strbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) iopte_t *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) unsigned long flags, npages, oaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) unsigned long i, base_paddr, ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) u32 bus_addr, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unsigned long iopte_protection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) strbuf = dev->archdata.stc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (unlikely(direction == DMA_NONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) goto bad_no_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) oaddr = (unsigned long)(page_address(page) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) npages >>= IO_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) base = alloc_npages(dev, iommu, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) spin_lock_irqsave(&iommu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ctx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (iommu->iommu_ctxflush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) ctx = iommu_alloc_ctx(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) spin_unlock_irqrestore(&iommu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (unlikely(!base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) bus_addr = (iommu->tbl.table_map_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ((base - iommu->page_table) << IO_PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) base_paddr = __pa(oaddr & IO_PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (strbuf->strbuf_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) iopte_protection = IOPTE_STREAMING(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) iopte_protection = IOPTE_CONSISTENT(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (direction != DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) iopte_protection |= IOPTE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) iopte_val(*base) = iopte_protection | base_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) iommu_free_ctx(iommu, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) bad_no_ctx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) u32 vaddr, unsigned long ctx, unsigned long npages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) int limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (strbuf->strbuf_ctxflush &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) iommu->iommu_ctxflush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) unsigned long matchreg, flushreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) flushreg = strbuf->strbuf_ctxflush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) iommu_write(flushreg, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) val = iommu_read(matchreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) val &= 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) goto do_flush_sync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) while (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (val & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) iommu_write(flushreg, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) val >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) val = iommu_read(matchreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (unlikely(val)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) printk(KERN_WARNING "strbuf_flush: ctx flush "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) "timeout matchreg[%llx] ctx[%lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) val, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) goto do_page_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) do_page_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) iommu_write(strbuf->strbuf_pflush, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) do_flush_sync:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* If the device could not have possibly put dirty data into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * the streaming cache, no flush-flag synchronization needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * to be performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) STC_FLUSHFLAG_INIT(strbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) (void) iommu_read(iommu->write_complete_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) limit = 100000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) while (!STC_FLUSHFLAG_SET(strbuf)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) limit--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (!limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) printk(KERN_WARNING "strbuf_flush: flushflag timeout "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) "vaddr[%08x] ctx[%lx] npages[%ld]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) vaddr, ctx, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) size_t sz, enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct strbuf *strbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) iopte_t *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) unsigned long flags, npages, ctx, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (unlikely(direction == DMA_NONE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) strbuf = dev->archdata.stc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) npages >>= IO_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) base = iommu->page_table +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) bus_addr &= IO_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) spin_lock_irqsave(&iommu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /* Record the context, if any. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) ctx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (iommu->iommu_ctxflush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* Step 1: Kick data out of streaming buffers if necessary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) strbuf_flush(strbuf, iommu, bus_addr, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) npages, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* Step 2: Clear out TSB entries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) for (i = 0; i < npages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) iopte_make_dummy(iommu, base + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) iommu_free_ctx(iommu, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) spin_unlock_irqrestore(&iommu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) int nelems, enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct scatterlist *s, *outs, *segstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) unsigned long flags, handle, prot, ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) dma_addr_t dma_next = 0, dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) unsigned int max_seg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) unsigned long seg_boundary_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int outcount, incount, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct strbuf *strbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) unsigned long base_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) BUG_ON(direction == DMA_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) strbuf = dev->archdata.stc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (nelems == 0 || !iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) spin_lock_irqsave(&iommu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ctx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (iommu->iommu_ctxflush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ctx = iommu_alloc_ctx(iommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (strbuf->strbuf_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) prot = IOPTE_STREAMING(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) prot = IOPTE_CONSISTENT(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (direction != DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) prot |= IOPTE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) outs = s = segstart = &sglist[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) outcount = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) incount = nelems;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* Init first segment length for backout at failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) outs->dma_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) max_seg_size = dma_get_max_seg_size(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) for_each_sg(sglist, s, nelems, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) unsigned long paddr, npages, entry, out_entry = 0, slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) iopte_t *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) slen = s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (slen == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) dma_next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /* Allocate iommu entries for that segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) &handle, (unsigned long)(-1), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* Handle failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (unlikely(entry == IOMMU_ERROR_CODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) " npages %lx\n", iommu, paddr, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) goto iommu_map_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) base = iommu->page_table + entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Convert entry to a dma_addr_t */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) dma_addr = iommu->tbl.table_map_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) (entry << IO_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) dma_addr |= (s->offset & ~IO_PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /* Insert into HW table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) paddr &= IO_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) while (npages--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) iopte_val(*base) = prot | paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) base++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) paddr += IO_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* If we are in an open segment, try merging */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (segstart != s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* We cannot merge if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * - allocated dma_addr isn't contiguous to previous allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if ((dma_addr != dma_next) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) (outs->dma_length + s->length > max_seg_size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) (is_span_boundary(out_entry, base_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) seg_boundary_size, outs, s))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /* Can't merge: create a new segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) segstart = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) outcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) outs = sg_next(outs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) outs->dma_length += s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (segstart == s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* This is a new segment, fill entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) outs->dma_address = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) outs->dma_length = slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) out_entry = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /* Calculate next page pointer for contiguous check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) dma_next = dma_addr + slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) spin_unlock_irqrestore(&iommu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (outcount < incount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) outs = sg_next(outs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) outs->dma_address = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) outs->dma_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return outcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) iommu_map_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) for_each_sg(sglist, s, nelems, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (s->dma_length != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) unsigned long vaddr, npages, entry, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) iopte_t *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) vaddr = s->dma_address & IO_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) npages = iommu_num_pages(s->dma_address, s->dma_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) IO_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) entry = (vaddr - iommu->tbl.table_map_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) >> IO_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) base = iommu->page_table + entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) for (j = 0; j < npages; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) iopte_make_dummy(iommu, base + j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) IOMMU_ERROR_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) s->dma_address = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) s->dma_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (s == outs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) spin_unlock_irqrestore(&iommu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* If contexts are being used, they are the same in all of the mappings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * we make for a particular SG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) unsigned long ctx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (iommu->iommu_ctxflush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) iopte_t *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) u32 bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct iommu_map_table *tbl = &iommu->tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) bus_addr = sg->dma_address & IO_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) base = iommu->page_table +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) int nelems, enum dma_data_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) unsigned long flags, ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct strbuf *strbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) BUG_ON(direction == DMA_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) strbuf = dev->archdata.stc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) ctx = fetch_sg_ctx(iommu, sglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) spin_lock_irqsave(&iommu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) sg = sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) while (nelems--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) dma_addr_t dma_handle = sg->dma_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) unsigned int len = sg->dma_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) unsigned long npages, entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) iopte_t *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) entry = ((dma_handle - iommu->tbl.table_map_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) >> IO_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) base = iommu->page_table + entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) dma_handle &= IO_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) strbuf_flush(strbuf, iommu, dma_handle, ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) npages, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) for (i = 0; i < npages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) iopte_make_dummy(iommu, base + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) IOMMU_ERROR_CODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) sg = sg_next(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) iommu_free_ctx(iommu, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) spin_unlock_irqrestore(&iommu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static void dma_4u_sync_single_for_cpu(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) dma_addr_t bus_addr, size_t sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct strbuf *strbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) unsigned long flags, ctx, npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) strbuf = dev->archdata.stc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (!strbuf->strbuf_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) spin_lock_irqsave(&iommu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) npages >>= IO_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) bus_addr &= IO_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* Step 1: Record the context, if any. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) ctx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (iommu->iommu_ctxflush &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) strbuf->strbuf_ctxflush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) iopte_t *iopte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) struct iommu_map_table *tbl = &iommu->tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) iopte = iommu->page_table +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* Step 2: Kick data out of streaming buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) spin_unlock_irqrestore(&iommu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static void dma_4u_sync_sg_for_cpu(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct scatterlist *sglist, int nelems,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) enum dma_data_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct iommu *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct strbuf *strbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) unsigned long flags, ctx, npages, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct scatterlist *sg, *sgprv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) u32 bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) strbuf = dev->archdata.stc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (!strbuf->strbuf_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) spin_lock_irqsave(&iommu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* Step 1: Record the context, if any. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ctx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (iommu->iommu_ctxflush &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) strbuf->strbuf_ctxflush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) iopte_t *iopte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) struct iommu_map_table *tbl = &iommu->tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) iopte = iommu->page_table + ((sglist[0].dma_address -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) tbl->table_map_base) >> IO_PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* Step 2: Kick data out of streaming buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) sgprv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) for_each_sg(sglist, sg, nelems, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (sg->dma_length == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) sgprv = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) - bus_addr) >> IO_PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) spin_unlock_irqrestore(&iommu->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) static int dma_4u_supported(struct device *dev, u64 device_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct iommu *iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (ali_sound_dma_hack(dev, device_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (device_mask < iommu->dma_addr_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static const struct dma_map_ops sun4u_dma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) .alloc = dma_4u_alloc_coherent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) .free = dma_4u_free_coherent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) .map_page = dma_4u_map_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) .unmap_page = dma_4u_unmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) .map_sg = dma_4u_map_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) .unmap_sg = dma_4u_unmap_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) .sync_single_for_cpu = dma_4u_sync_single_for_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .dma_supported = dma_4u_supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) EXPORT_SYMBOL(dma_ops);