Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * arch/sh/drivers/dma/dma-sh.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * SuperH On-chip DMAC Support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 2000 Takashi YOSHII
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright (C) 2003, 2004 Paul Mundt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Copyright (C) 2005 Andriy Skulysh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <mach-dreamcast/mach/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/dma-register.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <cpu/dma-register.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <cpu/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * Define the default configuration for dual address memory-memory transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * The 0x400 value represents auto-request, external->external.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define RS_DUAL	(DM_INC | SM_INC | RS_AUTO | TS_INDEX2VAL(XMIT_SZ_32BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static unsigned long dma_find_base(unsigned int chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	unsigned long base = SH_DMAC_BASE0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #ifdef SH_DMAC_BASE1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	if (chan >= 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		base = SH_DMAC_BASE1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	return base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) static unsigned long dma_base_addr(unsigned int chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	unsigned long base = dma_find_base(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	/* Normalize offset calculation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	if (chan >= 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		chan -= 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	if (chan >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		base += 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	return base + (chan * 0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #ifdef CONFIG_SH_DMA_IRQ_MULTI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static inline unsigned int get_dmte_irq(unsigned int chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	return chan >= 6 ? DMTE6_IRQ : DMTE0_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) static unsigned int dmte_irq_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	DMTE0_IRQ, DMTE0_IRQ + 1, DMTE0_IRQ + 2, DMTE0_IRQ + 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #ifdef DMTE4_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	DMTE4_IRQ, DMTE4_IRQ + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #ifdef DMTE6_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	DMTE6_IRQ, DMTE6_IRQ + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #ifdef DMTE8_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	DMTE8_IRQ, DMTE9_IRQ, DMTE10_IRQ, DMTE11_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static inline unsigned int get_dmte_irq(unsigned int chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	return dmte_irq_map[chan];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  * We determine the correct shift size based off of the CHCR transmit size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * for the given channel. Since we know that it will take:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  *	info->count >> ts_shift[transmit_size]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * iterations to complete the transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static unsigned int ts_shift[] = TS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) static inline unsigned int calc_xmit_shift(struct dma_channel *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	u32 chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	return ts_shift[cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * The transfer end interrupt must read the chcr register to end the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * hardware interrupt active condition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * Besides that it needs to waken any waiting process, which should handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * setting up the next transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static irqreturn_t dma_tei(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct dma_channel *chan = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	u32 chcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	if (!(chcr & CHCR_TE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	chcr &= ~(CHCR_IE | CHCR_DE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	wake_up(&chan->wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static int sh_dmac_request_dma(struct dma_channel *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	if (unlikely(!(chan->flags & DMA_TEI_CAPABLE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	return request_irq(get_dmte_irq(chan->chan), dma_tei, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			   chan->dev_id, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void sh_dmac_free_dma(struct dma_channel *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	free_irq(get_dmte_irq(chan->chan), chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) sh_dmac_configure_channel(struct dma_channel *chan, unsigned long chcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (!chcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		chcr = RS_DUAL | CHCR_IE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	if (chcr & CHCR_IE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		chcr &= ~CHCR_IE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		chan->flags |= DMA_TEI_CAPABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		chan->flags &= ~DMA_TEI_CAPABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	chan->flags |= DMA_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static void sh_dmac_enable_dma(struct dma_channel *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	u32 chcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	chcr |= CHCR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (chan->flags & DMA_TEI_CAPABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		chcr |= CHCR_IE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	if (chan->flags & DMA_TEI_CAPABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		irq = get_dmte_irq(chan->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		enable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static void sh_dmac_disable_dma(struct dma_channel *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	u32 chcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	if (chan->flags & DMA_TEI_CAPABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		irq = get_dmte_irq(chan->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		disable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	chcr = __raw_readl(dma_base_addr(chan->chan) + CHCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	__raw_writel(chcr, (dma_base_addr(chan->chan) + CHCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static int sh_dmac_xfer_dma(struct dma_channel *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	 * If we haven't pre-configured the channel with special flags, use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	 * the defaults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	if (unlikely(!(chan->flags & DMA_CONFIGURED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		sh_dmac_configure_channel(chan, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	sh_dmac_disable_dma(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	 * Single-address mode usage note!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	 * It's important that we don't accidentally write any value to SAR/DAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	 * (this includes 0) that hasn't been directly specified by the user if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	 * we're in single-address mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	 * In this case, only one address can be defined, anything else will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	 * result in a DMA address error interrupt (at least on the SH-4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	 * which will subsequently halt the transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	 * Channel 2 on the Dreamcast is a special case, as this is used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	 * cascading to the PVR2 DMAC. In this case, we still need to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	 * SAR and DAR, regardless of value, in order for cascading to work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (chan->sar || (mach_is_dreamcast() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 			  chan->chan == PVR2_CASCADE_CHAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		__raw_writel(chan->sar, (dma_base_addr(chan->chan) + SAR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	if (chan->dar || (mach_is_dreamcast() &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			  chan->chan == PVR2_CASCADE_CHAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		__raw_writel(chan->dar, (dma_base_addr(chan->chan) + DAR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	__raw_writel(chan->count >> calc_xmit_shift(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		(dma_base_addr(chan->chan) + TCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	sh_dmac_enable_dma(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static int sh_dmac_get_dma_residue(struct dma_channel *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (!(__raw_readl(dma_base_addr(chan->chan) + CHCR) & CHCR_DE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	return __raw_readl(dma_base_addr(chan->chan) + TCR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		 << calc_xmit_shift(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  * DMAOR handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #if defined(CONFIG_CPU_SUBTYPE_SH7723)	|| \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)     defined(CONFIG_CPU_SUBTYPE_SH7724)	|| \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)     defined(CONFIG_CPU_SUBTYPE_SH7780)	|| \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)     defined(CONFIG_CPU_SUBTYPE_SH7785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #define NR_DMAOR	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #define NR_DMAOR	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)  * DMAOR bases are broken out amongst channel groups. DMAOR0 manages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)  * channels 0 - 5, DMAOR1 6 - 11 (optional).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) #define dmaor_read_reg(n)		__raw_readw(dma_find_base((n)*6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #define dmaor_write_reg(n, data)	__raw_writew(data, dma_find_base(n)*6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static inline int dmaor_reset(int no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	unsigned long dmaor = dmaor_read_reg(no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	/* Try to clear the error flags first, incase they are set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	dmaor &= ~(DMAOR_NMIF | DMAOR_AE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	dmaor_write_reg(no, dmaor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	dmaor |= DMAOR_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	dmaor_write_reg(no, dmaor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	/* See if we got an error again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	if ((dmaor_read_reg(no) & (DMAOR_AE | DMAOR_NMIF))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		printk(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  * DMAE handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #ifdef CONFIG_CPU_SH4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #if defined(DMAE1_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #define NR_DMAE		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #define NR_DMAE		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static const char *dmae_name[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	"DMAC Address Error0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	"DMAC Address Error1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #ifdef CONFIG_SH_DMA_IRQ_MULTI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static inline unsigned int get_dma_error_irq(int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	return get_dmte_irq(n * 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static unsigned int dmae_irq_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	DMAE0_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #ifdef DMAE1_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	DMAE1_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static inline unsigned int get_dma_error_irq(int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	return dmae_irq_map[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static irqreturn_t dma_err(int irq, void *dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	for (i = 0; i < NR_DMAOR; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		dmaor_reset(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	disable_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static int dmae_irq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	for (n = 0; n < NR_DMAE; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		int i = request_irq(get_dma_error_irq(n), dma_err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 				    IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		if (unlikely(i < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static void dmae_irq_free(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	for (n = 0; n < NR_DMAE; n++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		free_irq(get_dma_error_irq(n), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static inline int dmae_irq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static void dmae_irq_free(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static struct dma_ops sh_dmac_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	.request	= sh_dmac_request_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	.free		= sh_dmac_free_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	.get_residue	= sh_dmac_get_dma_residue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	.xfer		= sh_dmac_xfer_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	.configure	= sh_dmac_configure_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static struct dma_info sh_dmac_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	.name		= "sh_dmac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	.nr_channels	= CONFIG_NR_ONCHIP_DMA_CHANNELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	.ops		= &sh_dmac_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	.flags		= DMAC_CHANNELS_TEI_CAPABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static int __init sh_dmac_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	struct dma_info *info = &sh_dmac_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	 * Initialize DMAE, for parts that support it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	rc = dmae_irq_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	if (unlikely(rc != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	 * Initialize DMAOR, and clean up any error flags that may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	 * been set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	for (i = 0; i < NR_DMAOR; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		rc = dmaor_reset(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		if (unlikely(rc != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	return register_dmac(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static void __exit sh_dmac_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	dmae_irq_free();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	unregister_dmac(&sh_dmac_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) subsys_initcall(sh_dmac_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) module_exit(sh_dmac_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) MODULE_AUTHOR("Takashi YOSHII, Paul Mundt, Andriy Skulysh");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) MODULE_DESCRIPTION("SuperH On-Chip DMAC Support");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) MODULE_LICENSE("GPL v2");