^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Mips Jazz DMA controller support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 1995, 1996 by Andreas Busse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * NOTE: Some of the argument checking could be removed when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * things have settled down. Also, instead of returning 0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * on failure of vdma_alloc() one could leave page #0 unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * and return the more usual NULL pointer as logical address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/mipsregs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/jazz.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/jazzdma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Set this to one to enable additional vdma debug code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define CONF_DEBUG_VDMA 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static VDMA_PGTBL_ENTRY *pgtbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static DEFINE_SPINLOCK(vdma_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Debug stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define vdma_debug ((CONF_DEBUG_VDMA) ? debuglvl : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static int debuglvl = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Initialize the pagetable with a one-to-one mapping of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * the first 16 Mbytes of main memory and declare all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * entries to be unused. Using this method will at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * allow some early device driver operations to work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static inline void vdma_pgtbl_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned long paddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) for (i = 0; i < VDMA_PGTBL_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) pgtbl[i].frame = paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) pgtbl[i].owner = VDMA_PAGE_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) paddr += VDMA_PAGESIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Initialize the Jazz R4030 dma controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static int __init vdma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * Allocate 32k of memory for DMA page tables. This needs to be page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * aligned and should be uncached to avoid cache flushing after every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) pgtbl = (VDMA_PGTBL_ENTRY *)__get_free_pages(GFP_KERNEL | GFP_DMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) get_order(VDMA_PGTBL_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) BUG_ON(!pgtbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Clear the R4030 translation table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) vdma_pgtbl_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) CPHYSADDR((unsigned long)pgtbl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) printk(KERN_INFO "VDMA: R4030 DMA pagetables initialized.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) arch_initcall(vdma_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Allocate DMA pagetables using a simple first-fit algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned long vdma_alloc(unsigned long paddr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int first, last, pages, frame, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned long laddr, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* check arguments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (paddr > 0x1fffffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (vdma_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) printk("vdma_alloc: Invalid physical address: %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return DMA_MAPPING_ERROR; /* invalid physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (size > 0x400000 || size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (vdma_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) printk("vdma_alloc: Invalid size: %08lx\n", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return DMA_MAPPING_ERROR; /* invalid physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) spin_lock_irqsave(&vdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * Find free chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) pages = VDMA_PAGE(paddr + size) - VDMA_PAGE(paddr) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) while (pgtbl[first].owner != VDMA_PAGE_EMPTY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) first < VDMA_PGTBL_ENTRIES) first++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (first + pages > VDMA_PGTBL_ENTRIES) { /* nothing free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) spin_unlock_irqrestore(&vdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) last = first + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) while (pgtbl[last].owner == VDMA_PAGE_EMPTY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) && last - first < pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) last++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (last - first == pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) break; /* found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) first = last + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * Mark pages as allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) laddr = (first << 12) + (paddr & (VDMA_PAGESIZE - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) frame = paddr & ~(VDMA_PAGESIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) for (i = first; i < last; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) pgtbl[i].frame = frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) pgtbl[i].owner = laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) frame += VDMA_PAGESIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * Update translation table and return logical start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (vdma_debug > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) printk("vdma_alloc: Allocated %d pages starting from %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) pages, laddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (vdma_debug > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) printk("LADDR: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) for (i = first; i < last; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) printk("%08x ", i << 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) printk("\nPADDR: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) for (i = first; i < last; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) printk("%08x ", pgtbl[i].frame);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) printk("\nOWNER: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) for (i = first; i < last; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) printk("%08x ", pgtbl[i].owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) spin_unlock_irqrestore(&vdma_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return laddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) EXPORT_SYMBOL(vdma_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * Free previously allocated dma translation pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * Note that this does NOT change the translation table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * it just marks the free'd pages as unused!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int vdma_free(unsigned long laddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) i = laddr >> 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (pgtbl[i].owner != laddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) printk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ("vdma_free: trying to free other's dma pages, laddr=%8lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) laddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) while (i < VDMA_PGTBL_ENTRIES && pgtbl[i].owner == laddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) pgtbl[i].owner = VDMA_PAGE_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (vdma_debug > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) printk("vdma_free: freed %ld pages starting from %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) i - (laddr >> 12), laddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) EXPORT_SYMBOL(vdma_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * Translate a physical address to a logical address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * This will return the logical address of the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) unsigned long vdma_phys2log(unsigned long paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int frame;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) frame = paddr & ~(VDMA_PAGESIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) for (i = 0; i < VDMA_PGTBL_ENTRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (pgtbl[i].frame == frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (i == VDMA_PGTBL_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return (i << 12) + (paddr & (VDMA_PAGESIZE - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) EXPORT_SYMBOL(vdma_phys2log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * Translate a logical DMA address to a physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) unsigned long vdma_log2phys(unsigned long laddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return pgtbl[laddr >> 12].frame + (laddr & (VDMA_PAGESIZE - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) EXPORT_SYMBOL(vdma_log2phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * Print DMA statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) void vdma_stats(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) printk("vdma_stats: CONFIG: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) r4030_read_reg32(JAZZ_R4030_CONFIG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) printk("R4030 translation table base: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) r4030_read_reg32(JAZZ_R4030_TRSTBL_BASE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) printk("R4030 translation table limit: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) r4030_read_reg32(JAZZ_R4030_TRSTBL_LIM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) printk("vdma_stats: INV_ADDR: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) r4030_read_reg32(JAZZ_R4030_INV_ADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) printk("vdma_stats: R_FAIL_ADDR: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) r4030_read_reg32(JAZZ_R4030_R_FAIL_ADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) printk("vdma_stats: M_FAIL_ADDR: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) r4030_read_reg32(JAZZ_R4030_M_FAIL_ADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) printk("vdma_stats: IRQ_SOURCE: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) r4030_read_reg32(JAZZ_R4030_IRQ_SOURCE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) printk("vdma_stats: I386_ERROR: %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) r4030_read_reg32(JAZZ_R4030_I386_ERROR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) printk("vdma_chnl_modes: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) printk("%04x ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_MODE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) (i << 5)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) printk("vdma_chnl_enables: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) printk("%04x ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) (i << 5)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * DMA transfer functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * Enable a DMA channel. Also clear any error conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) void vdma_enable(int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (vdma_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) printk("vdma_enable: channel %d\n", channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * Check error conditions first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) status = r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (status & 0x400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) printk("VDMA: Channel %d: Address error!\n", channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (status & 0x200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) printk("VDMA: Channel %d: Memory error!\n", channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * Clear all interrupt flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) (channel << 5)) | R4030_TC_INTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) | R4030_MEM_INTR | R4030_ADDR_INTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * Enable the desired channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) (channel << 5)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) R4030_CHNL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) EXPORT_SYMBOL(vdma_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * Disable a DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) void vdma_disable(int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (vdma_debug) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) int status =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) (channel << 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) printk("vdma_disable: channel %d\n", channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) printk("VDMA: channel %d status: %04x (%s) mode: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) "%02x addr: %06x count: %06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) channel, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ((status & 0x600) ? "ERROR" : "OK"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_MODE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) (channel << 5)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_ADDR +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) (channel << 5)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_COUNT +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) (channel << 5)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) (channel << 5)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) ~R4030_CHNL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * After disabling a DMA channel a remote bus register should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * read to ensure that the current DMA acknowledge cycle is completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) *((volatile unsigned int *) JAZZ_DUMMY_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) EXPORT_SYMBOL(vdma_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * Set DMA mode. This function accepts the mode values used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * to set a PC-style DMA controller. For the SCSI and FDC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * channels, we also set the default modes each time we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * NOTE: The FAST and BURST dma modes are supported by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * R4030 Rev. 2 and PICA chipsets only. I leave them disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) void vdma_set_mode(int channel, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (vdma_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) printk("vdma_set_mode: channel %d, mode 0x%x\n", channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) switch (channel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) case JAZZ_SCSI_DMA: /* scsi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) r4030_write_reg32(JAZZ_R4030_CHNL_MODE + (channel << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* R4030_MODE_FAST | */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /* R4030_MODE_BURST | */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) R4030_MODE_INTR_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) R4030_MODE_WIDTH_16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) R4030_MODE_ATIME_80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) case JAZZ_FLOPPY_DMA: /* floppy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) r4030_write_reg32(JAZZ_R4030_CHNL_MODE + (channel << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /* R4030_MODE_FAST | */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* R4030_MODE_BURST | */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) R4030_MODE_INTR_EN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) R4030_MODE_WIDTH_8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) R4030_MODE_ATIME_120);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) case JAZZ_AUDIOL_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) case JAZZ_AUDIOR_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) printk("VDMA: Audio DMA not supported yet.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) printk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ("VDMA: vdma_set_mode() called with unsupported channel %d!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) case DMA_MODE_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) (channel << 5)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ~R4030_CHNL_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) case DMA_MODE_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) (channel << 5)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) R4030_CHNL_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) printk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) ("VDMA: vdma_set_mode() called with unknown dma mode 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) EXPORT_SYMBOL(vdma_set_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * Set Transfer Address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) void vdma_set_addr(int channel, long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (vdma_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) printk("vdma_set_addr: channel %d, addr %lx\n", channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) r4030_write_reg32(JAZZ_R4030_CHNL_ADDR + (channel << 5), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) EXPORT_SYMBOL(vdma_set_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * Set Transfer Count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) void vdma_set_count(int channel, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (vdma_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) printk("vdma_set_count: channel %d, count %08x\n", channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) (unsigned) count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) r4030_write_reg32(JAZZ_R4030_CHNL_COUNT + (channel << 5), count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) EXPORT_SYMBOL(vdma_set_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * Get Residual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) int vdma_get_residue(int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int residual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) residual = r4030_read_reg32(JAZZ_R4030_CHNL_COUNT + (channel << 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (vdma_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) printk("vdma_get_residual: channel %d: residual=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) channel, residual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return residual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * Get DMA channel enable register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) int vdma_get_enable(int channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) int enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) enable = r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (vdma_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) printk("vdma_get_enable: channel %d: enable=%d\n", channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static void *jazz_dma_alloc(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (attrs & DMA_ATTR_NO_WARN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) gfp |= __GFP_NOWARN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) page = alloc_pages(gfp, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) ret = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) memset(ret, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) *dma_handle = vdma_alloc(virt_to_phys(ret), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (*dma_handle == DMA_MAPPING_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) goto out_free_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) arch_dma_prep_coherent(page, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return (void *)(UNCAC_BASE + __pa(ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) out_free_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) __free_pages(page, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) dma_addr_t dma_handle, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) vdma_free(dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) __free_pages(virt_to_page(vaddr), get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) unsigned long offset, size_t size, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) phys_addr_t phys = page_to_phys(page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) arch_sync_dma_for_device(phys, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return vdma_alloc(phys, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) size_t size, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) vdma_free(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int nents, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) for_each_sg(sglist, sg, nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) arch_sync_dma_for_device(sg_phys(sg), sg->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (sg->dma_address == DMA_MAPPING_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) sg_dma_len(sg) = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) int nents, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) for_each_sg(sglist, sg, nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) vdma_free(sg->dma_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static void jazz_dma_sync_single_for_device(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) dma_addr_t addr, size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) arch_sync_dma_for_device(vdma_log2phys(addr), size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static void jazz_dma_sync_single_for_cpu(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) dma_addr_t addr, size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static void jazz_dma_sync_sg_for_device(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct scatterlist *sgl, int nents, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) for_each_sg(sgl, sg, nents, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) static void jazz_dma_sync_sg_for_cpu(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct scatterlist *sgl, int nents, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) for_each_sg(sgl, sg, nents, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) const struct dma_map_ops jazz_dma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) .alloc = jazz_dma_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) .free = jazz_dma_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) .map_page = jazz_dma_map_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) .unmap_page = jazz_dma_unmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) .map_sg = jazz_dma_map_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) .unmap_sg = jazz_dma_unmap_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) .sync_single_for_cpu = jazz_dma_sync_single_for_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) .sync_single_for_device = jazz_dma_sync_single_for_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) .sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) .sync_sg_for_device = jazz_dma_sync_sg_for_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) .mmap = dma_common_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) .get_sgtable = dma_common_get_sgtable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) .alloc_pages = dma_common_alloc_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) .free_pages = dma_common_free_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) EXPORT_SYMBOL(jazz_dma_ops);