^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * iommu.c: IOMMU specific routines for memory management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/mxcc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/mbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/bitext.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "mm_32.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * This can be sized dynamically, but we will do this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * only when we have a guidance about actual I/O pressures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define IOMMU_RNGE IOMMU_RNGE_256MB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define IOMMU_START 0xF0000000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define IOMMU_WINSIZE (256*1024*1024U)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define IOMMU_ORDER 6 /* 4096 * (1<<6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static int viking_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* viking.S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) extern void viking_flush_page(unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) extern void viking_mxcc_flush_page(unsigned long page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Values precomputed according to CPU type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static unsigned int ioperm_noc; /* Consistent mapping iopte flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static pgprot_t dvma_prot; /* Consistent mapping pte flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define IOPERM (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static const struct dma_map_ops sbus_iommu_dma_gflush_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static const struct dma_map_ops sbus_iommu_dma_pflush_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static void __init sbus_iommu_init(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct iommu_struct *iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned int impl, vers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long *bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned long control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned long base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (!iommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) prom_printf("Unable to allocate iommu structure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) "iommu_regs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!iommu->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) prom_printf("Cannot map IOMMU registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) control = sbus_readl(&iommu->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) impl = (control & IOMMU_CTRL_IMPL) >> 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) vers = (control & IOMMU_CTRL_VERS) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) control &= ~(IOMMU_CTRL_RNGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) sbus_writel(control, &iommu->regs->control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) iommu_invalidate(iommu->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) iommu->start = IOMMU_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) iommu->end = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Allocate IOMMU page table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Stupid alignment constraints give me a headache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) We need 256K or 512K or 1M or 2M area aligned to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) its size and current gfp will fortunately give
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) it to us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (!tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) prom_printf("Unable to allocate iommu table [0x%lx]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) IOMMU_NPTES * sizeof(iopte_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) iommu->page_table = (iopte_t *)tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Initialize new table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) base = __pa((unsigned long)iommu->page_table) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) sbus_writel(base, &iommu->regs->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) iommu_invalidate(iommu->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (!bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) prom_printf("Unable to allocate iommu bitmap [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) (int)(IOMMU_NPTES>>3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* To be coherent on HyperSparc, the page color of DVMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * and physical addresses must match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (srmmu_modtype == HyperSparc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) iommu->usemap.num_colors = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) impl, vers, iommu->page_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) op->dev.archdata.iommu = iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (flush_page_for_dma_global)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) op->dev.dma_ops = &sbus_iommu_dma_gflush_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) op->dev.dma_ops = &sbus_iommu_dma_pflush_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static int __init iommu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct device_node *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) for_each_node_by_name(dp, "iommu") {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct platform_device *op = of_find_device_by_node(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) sbus_iommu_init(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) of_propagate_archdata(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) subsys_initcall(iommu_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Flush the iotlb entries to ram. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* This could be better if we didn't have to flush whole pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) start = (unsigned long)iopte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) start &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (viking_mxcc_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) while(start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) viking_mxcc_flush_page(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) start += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) } else if (viking_flush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) while(start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) viking_flush_page(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) start += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) while(start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) __flush_page_to_ram(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) start += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) unsigned long offset, size_t len, bool per_page_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct iommu_struct *iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) phys_addr_t paddr = page_to_phys(page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) unsigned long off = paddr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned long pfn = __phys_to_pfn(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned int busa, busa0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) iopte_t *iopte, *iopte0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int ioptex, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* XXX So what is maxphys for us and how do drivers know it? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (!len || len > 256 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * We expect unmapped highmem pages to be not in the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * XXX Is this a good assumption?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * XXX What if someone else unmaps it here and races us?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (per_page_flush && !PageHighMem(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned long vaddr, p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) vaddr = (unsigned long)page_address(page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) flush_page_for_dma(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* page color = pfn of page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ioptex = bit_map_string_get(&iommu->usemap, npages, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (ioptex < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) panic("iommu out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) busa0 = iommu->start + (ioptex << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) iopte0 = &iommu->page_table[ioptex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) busa = busa0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) iopte = iopte0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) for (i = 0; i < npages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) iopte_val(*iopte) = MKIOPTE(pfn, IOPERM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) iommu_invalidate_page(iommu->regs, busa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) busa += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) iopte++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) pfn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) iommu_flush_iotlb(iopte0, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return busa0 + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct page *page, unsigned long offset, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) flush_page_for_dma(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return __sbus_iommu_map_page(dev, page, offset, len, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct page *page, unsigned long offset, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return __sbus_iommu_map_page(dev, page, offset, len, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) int nents, enum dma_data_direction dir, unsigned long attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) bool per_page_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) for_each_sg(sgl, sg, nents, j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) sg->offset, sg->length, per_page_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (sg->dma_address == DMA_MAPPING_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) sg->dma_length = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int nents, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) flush_page_for_dma(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int nents, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) size_t len, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct iommu_struct *iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) unsigned int busa = dma_addr & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) unsigned long off = dma_addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) BUG_ON(busa < iommu->start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) for (i = 0; i < npages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) iopte_val(iommu->page_table[ioptex + i]) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) iommu_invalidate_page(iommu->regs, busa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) busa += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) bit_map_clear(&iommu->usemap, ioptex, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) int nents, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) for_each_sg(sgl, sg, nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) sg->dma_address = 0x21212121;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #ifdef CONFIG_SBUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void *sbus_iommu_alloc(struct device *dev, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct iommu_struct *iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) unsigned long va, addr, page, end, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) iopte_t *iopte = iommu->page_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) iopte_t *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int ioptex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* XXX So what is maxphys for us and how do drivers know it? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (!len || len > 256 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) len = PAGE_ALIGN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (va == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) addr = ret = sparc_dma_alloc_resource(dev, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) goto out_free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) BUG_ON((va & ~PAGE_MASK) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) BUG_ON((addr & ~PAGE_MASK) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) BUG_ON((len & ~PAGE_MASK) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* page color = physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) addr >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (ioptex < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) panic("iommu out");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) iopte += ioptex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) first = iopte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) end = addr + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) while(addr < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) page = va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) pmd_t *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (viking_mxcc_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) viking_mxcc_flush_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) else if (viking_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) viking_flush_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) __flush_page_to_ram(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) pmdp = pmd_off_k(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ptep = pte_offset_map(pmdp, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) iopte_val(*iopte++) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) addr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) va += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /* P3: why do we need this?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * DAVEM: Because there are several aspects, none of which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * are handled by a single interface. Some cpus are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * completely not I/O DMA coherent, and some have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * virtually indexed caches. The driver DMA flushing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * methods handle the former case, but here during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * IOMMU page table modifications, and usage of non-cacheable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * cpu mappings of pages potentially in the cpu caches, we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * to handle the latter case as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) iommu_flush_iotlb(first, len >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) iommu_invalidate(iommu->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) *dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return (void *)ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) out_free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) free_pages(va, get_order(len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) dma_addr_t busa, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct iommu_struct *iommu = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) iopte_t *iopte = iommu->page_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct page *page = virt_to_page(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (!sparc_dma_free_resource(cpu_addr, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) BUG_ON((busa & ~PAGE_MASK) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) BUG_ON((len & ~PAGE_MASK) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) iopte += ioptex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) end = busa + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) while (busa < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) iopte_val(*iopte++) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) busa += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) iommu_invalidate(iommu->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) __free_pages(page, get_order(len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) #ifdef CONFIG_SBUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) .alloc = sbus_iommu_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) .free = sbus_iommu_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) .map_page = sbus_iommu_map_page_gflush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) .unmap_page = sbus_iommu_unmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) .map_sg = sbus_iommu_map_sg_gflush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) .unmap_sg = sbus_iommu_unmap_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) #ifdef CONFIG_SBUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) .alloc = sbus_iommu_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) .free = sbus_iommu_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) .map_page = sbus_iommu_map_page_pflush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) .unmap_page = sbus_iommu_unmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) .map_sg = sbus_iommu_map_sg_pflush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) .unmap_sg = sbus_iommu_unmap_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) void __init ld_mmu_iommu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }