^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * io-unit.c: IO-UNIT specific routines for memory management.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/io-unit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/mxcc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/oplib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "mm_32.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* #define IOUNIT_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #ifdef IOUNIT_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define IOD(x) printk(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define IOD(x) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define IOPERM (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static const struct dma_map_ops iounit_dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static void __init iounit_iommu_init(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct iounit_struct *iounit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) iopte_t __iomem *xpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) iopte_t __iomem *xptend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (!iounit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) iounit->limit[0] = IOUNIT_BMAP1_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) iounit->limit[1] = IOUNIT_BMAP2_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) iounit->limit[2] = IOUNIT_BMAPM_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) iounit->limit[3] = IOUNIT_BMAPM_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) iounit->rotor[1] = IOUNIT_BMAP2_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) iounit->rotor[2] = IOUNIT_BMAPM_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (!xpt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) prom_printf("SUN4D: Cannot map External Page Table.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) prom_halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) op->dev.archdata.iommu = iounit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) iounit->page_table = xpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) spin_lock_init(&iounit->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) for (; xpt < xptend; xpt++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) sbus_writel(0, xpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) op->dev.dma_ops = &iounit_dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static int __init iounit_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) extern void sun4d_init_sbi_irq(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct device_node *dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) for_each_node_by_name(dp, "sbi") {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct platform_device *op = of_find_device_by_node(dp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) iounit_iommu_init(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) of_propagate_archdata(op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) sun4d_init_sbi_irq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) subsys_initcall(iounit_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* One has to hold iounit->lock to call this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) int i, j, k, npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long rotor, scan, limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) iopte_t iopte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* A tiny bit of magic ingredience :) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) switch (npages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) case 1: i = 0x0231; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) case 2: i = 0x0132; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) default: i = 0x0213; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) next: j = (i & 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) rotor = iounit->rotor[j - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) limit = iounit->limit[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) scan = rotor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (scan + npages > limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (limit != rotor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) limit = rotor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) scan = iounit->limit[j - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) goto nexti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) i >>= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (!(i & 15))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) for (k = 1, scan++; k < npages; k++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (test_bit(scan++, iounit->bmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) goto nexti;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) scan -= npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) set_bit(scan, iounit->bmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) sbus_writel(iopte_val(iopte), &iounit->page_table[scan]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) IOD(("%08lx\n", vaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static dma_addr_t iounit_map_page(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) unsigned long offset, size_t len, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) void *vaddr = page_address(page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct iounit_struct *iounit = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned long ret, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* XXX So what is maxphys for us and how do drivers know it? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!len || len > 256 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) spin_lock_irqsave(&iounit->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spin_unlock_irqrestore(&iounit->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static int iounit_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct iounit_struct *iounit = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* FIXME: Cache some resolved pages - often several sg entries are to the same page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) spin_lock_irqsave(&iounit->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) for_each_sg(sgl, sg, nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) sg->dma_length = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) spin_unlock_irqrestore(&iounit->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct iounit_struct *iounit = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) spin_lock_irqsave(&iounit->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) for (len += vaddr; vaddr < len; vaddr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) clear_bit(vaddr, iounit->bmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) spin_unlock_irqrestore(&iounit->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static void iounit_unmap_sg(struct device *dev, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int nents, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct iounit_struct *iounit = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned long flags, vaddr, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) spin_lock_irqsave(&iounit->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) for_each_sg(sgl, sg, nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) for (len += vaddr; vaddr < len; vaddr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) clear_bit(vaddr, iounit->bmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) spin_unlock_irqrestore(&iounit->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #ifdef CONFIG_SBUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static void *iounit_alloc(struct device *dev, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct iounit_struct *iounit = dev->archdata.iommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) unsigned long va, addr, page, end, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) pgprot_t dvma_prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) iopte_t __iomem *iopte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* XXX So what is maxphys for us and how do drivers know it? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (!len || len > 256 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) len = PAGE_ALIGN(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (!va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) addr = ret = sparc_dma_alloc_resource(dev, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) goto out_free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) *dma_handle = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) end = PAGE_ALIGN((addr + len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) while(addr < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) page = va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) pmd_t *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) pmdp = pmd_off_k(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ptep = pte_offset_map(pmdp, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) iopte = iounit->page_table + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) sbus_writel(iopte_val(MKIOPTE(__pa(page))), iopte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) addr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) va += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) flush_cache_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return (void *)ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) out_free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) free_pages(va, get_order(len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void iounit_free(struct device *dev, size_t size, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) dma_addr_t dma_addr, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* XXX Somebody please fill this in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static const struct dma_map_ops iounit_dma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #ifdef CONFIG_SBUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .alloc = iounit_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) .free = iounit_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) .map_page = iounit_map_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) .unmap_page = iounit_unmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) .map_sg = iounit_map_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) .unmap_sg = iounit_unmap_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) };