Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Renesas SuperH DMA Engine support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * base is drivers/dma/flsdma.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * - DMA of SuperH does not have Hardware DMA chain mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * - MAX DMA size is 16MB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/sh_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include "../dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include "shdma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) /* DMA registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define SAR	0x00	/* Source Address Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define DAR	0x04	/* Destination Address Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define TCR	0x08	/* Transfer Count Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define CHCR	0x0C	/* Channel Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define DMAOR	0x40	/* DMA Operation Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define TEND	0x18 /* USB-DMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define SH_DMAE_DRV_NAME "sh-dma-engine"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) /* Default MEMCPY transfer size = 2^2 = 4 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define LOG2_DEFAULT_XFER_SIZE	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define SH_DMA_SLAVE_NUMBER 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * Used for write-side mutual exclusion for the global device list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * read-side synchronization by way of RCU, and per-controller data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) static DEFINE_SPINLOCK(sh_dmae_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static LIST_HEAD(sh_dmae_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * Different DMAC implementations provide different ways to clear DMA channels:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * (1) none - no CHCLR registers are available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * (2) one CHCLR register per channel - 0 has to be written to it to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  *     channel buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * (3) one CHCLR per several channels - 1 has to be written to the bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  *     corresponding to the specific channel to reset it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static void channel_clear(struct sh_dmae_chan *sh_dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	const struct sh_dmae_channel *chan_pdata = shdev->pdata->channel +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		sh_dc->shdma_chan.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	u32 val = shdev->pdata->chclr_bitwise ? 1 << chan_pdata->chclr_bit : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	__raw_writel(val, shdev->chan_reg + chan_pdata->chclr_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	__raw_writel(data, sh_dc->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	return __raw_readl(sh_dc->base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) static u16 dmaor_read(struct sh_dmae_device *shdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	void __iomem *addr = shdev->chan_reg + DMAOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	if (shdev->pdata->dmaor_is_32bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		return __raw_readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		return __raw_readw(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	void __iomem *addr = shdev->chan_reg + DMAOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (shdev->pdata->dmaor_is_32bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		__raw_writel(data, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		__raw_writew(data, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	__raw_writel(data, sh_dc->base + shdev->chcr_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static u32 chcr_read(struct sh_dmae_chan *sh_dc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	return __raw_readl(sh_dc->base + shdev->chcr_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  * Reset DMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * SH7780 has two DMAOR register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	unsigned short dmaor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	spin_lock_irqsave(&sh_dmae_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	dmaor = dmaor_read(shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	spin_unlock_irqrestore(&sh_dmae_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static int sh_dmae_rst(struct sh_dmae_device *shdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	unsigned short dmaor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	spin_lock_irqsave(&sh_dmae_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (shdev->pdata->chclr_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		for (i = 0; i < shdev->pdata->channel_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			struct sh_dmae_chan *sh_chan = shdev->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			if (sh_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 				channel_clear(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	dmaor = dmaor_read(shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	spin_unlock_irqrestore(&sh_dmae_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	if (shdev->pdata->dmaor_init & ~dmaor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		dev_warn(shdev->shdma_dev.dma_dev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			 "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			 dmaor, shdev->pdata->dmaor_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	u32 chcr = chcr_read(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		return true; /* working */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	return false; /* waiting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	const struct sh_dmae_pdata *pdata = shdev->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	if (cnt >= pdata->ts_shift_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	return pdata->ts_shift[cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	const struct sh_dmae_pdata *pdata = shdev->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	for (i = 0; i < pdata->ts_shift_num; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		if (pdata->ts_shift[i] == l2size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	if (i == pdata->ts_shift_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		((i << pdata->ts_high_shift) & pdata->ts_high_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	sh_dmae_writel(sh_chan, hw->sar, SAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	sh_dmae_writel(sh_chan, hw->dar, DAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static void dmae_start(struct sh_dmae_chan *sh_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	u32 chcr = chcr_read(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (shdev->pdata->needs_tend_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	chcr |= CHCR_DE | shdev->chcr_ie_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	chcr_write(sh_chan, chcr & ~CHCR_TE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static void dmae_init(struct sh_dmae_chan *sh_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	 * Default configuration for dual address memory-memory transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	u32 chcr = DM_INC | SM_INC | RS_AUTO | log2size_to_chcr(sh_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 						   LOG2_DEFAULT_XFER_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	chcr_write(sh_chan, chcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	/* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	if (dmae_is_busy(sh_chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	chcr_write(sh_chan, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	const struct sh_dmae_pdata *pdata = shdev->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	void __iomem *addr = shdev->dmars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	unsigned int shift = chan_pdata->dmars_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	if (dmae_is_busy(sh_chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (pdata->no_dmars)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	/* in the case of a missing DMARS resource use first memory window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		addr = shdev->chan_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	addr += chan_pdata->dmars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	__raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		     addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static void sh_dmae_start_xfer(struct shdma_chan *schan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 			       struct shdma_desc *sdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 						    shdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	struct sh_dmae_desc *sh_desc = container_of(sdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 					struct sh_dmae_desc, shdma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		sdesc->async_tx.cookie, sh_chan->shdma_chan.id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	/* Get the ld start address from ld_queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	dmae_set_reg(sh_chan, &sh_desc->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	dmae_start(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static bool sh_dmae_channel_busy(struct shdma_chan *schan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 						    shdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	return dmae_is_busy(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static void sh_dmae_setup_xfer(struct shdma_chan *schan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 			       int slave_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 						    shdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	if (slave_id >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		const struct sh_dmae_slave_config *cfg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			sh_chan->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		dmae_set_dmars(sh_chan, cfg->mid_rid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		dmae_set_chcr(sh_chan, cfg->chcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		dmae_init(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  * Find a slave channel configuration from the contoller list by either a slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  * ID in the non-DT case, or by a MID/RID value in the DT case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static const struct sh_dmae_slave_config *dmae_find_slave(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	struct sh_dmae_chan *sh_chan, int match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	const struct sh_dmae_pdata *pdata = shdev->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	const struct sh_dmae_slave_config *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	if (!sh_chan->shdma_chan.dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		if (match >= SH_DMA_SLAVE_NUMBER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			if (cfg->slave_id == match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 				return cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 			if (cfg->mid_rid == match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 				sh_chan->shdma_chan.slave_id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 				return cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static int sh_dmae_set_slave(struct shdma_chan *schan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			     int slave_id, dma_addr_t slave_addr, bool try)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 						    shdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	if (!cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	if (!try) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		sh_chan->config = cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		sh_chan->slave_addr = slave_addr ? : cfg->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static void dmae_halt(struct sh_dmae_chan *sh_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	struct sh_dmae_device *shdev = to_sh_dev(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	u32 chcr = chcr_read(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	chcr_write(sh_chan, chcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static int sh_dmae_desc_setup(struct shdma_chan *schan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			      struct shdma_desc *sdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			      dma_addr_t src, dma_addr_t dst, size_t *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	struct sh_dmae_desc *sh_desc = container_of(sdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 					struct sh_dmae_desc, shdma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	if (*len > schan->max_xfer_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		*len = schan->max_xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	sh_desc->hw.sar = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	sh_desc->hw.dar = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	sh_desc->hw.tcr = *len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static void sh_dmae_halt(struct shdma_chan *schan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 						    shdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	dmae_halt(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 						    shdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	if (!(chcr_read(sh_chan) & CHCR_TE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	/* DMA stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	dmae_halt(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static size_t sh_dmae_get_partial(struct shdma_chan *schan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 				  struct shdma_desc *sdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 						    shdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	struct sh_dmae_desc *sh_desc = container_of(sdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 					struct sh_dmae_desc, shdma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	return sh_desc->hw.tcr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		(sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* Called from error IRQ or NMI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static bool sh_dmae_reset(struct sh_dmae_device *shdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	/* halt the dma controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	sh_dmae_ctl_stop(shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	/* We cannot detect, which channel caused the error, have to reset all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	ret = shdma_reset(&shdev->shdma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	sh_dmae_rst(shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static irqreturn_t sh_dmae_err(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	struct sh_dmae_device *shdev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	if (!(dmaor_read(shdev) & DMAOR_AE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	sh_dmae_reset(shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static bool sh_dmae_desc_completed(struct shdma_chan *schan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 				   struct shdma_desc *sdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	struct sh_dmae_chan *sh_chan = container_of(schan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 					struct sh_dmae_chan, shdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	struct sh_dmae_desc *sh_desc = container_of(sdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 					struct sh_dmae_desc, shdma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	u32 dar_buf = sh_dmae_readl(sh_chan, DAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	return	(sdesc->direction == DMA_DEV_TO_MEM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		 (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		(sdesc->direction != DMA_DEV_TO_MEM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		 (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	/* Fast path out if NMIF is not asserted for this controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	return sh_dmae_reset(shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static int sh_dmae_nmi_handler(struct notifier_block *self,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 			       unsigned long cmd, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	struct sh_dmae_device *shdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	int ret = NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	bool triggered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	 * Only concern ourselves with NMI events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	 * Normally we would check the die chain value, but as this needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	 * to be architecture independent, check for NMI context instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	if (!in_nmi())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		 * Only stop if one of the controllers has NMIF asserted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		 * we do not want to interfere with regular address error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		 * handling or NMI events that don't concern the DMACs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		triggered = sh_dmae_nmi_notify(shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		if (triggered == true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 			ret = NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	.notifier_call	= sh_dmae_nmi_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	/* Run before NMI debug handler and KGDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	.priority	= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static int sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 					int irq, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	struct shdma_dev *sdev = &shdev->shdma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	struct sh_dmae_chan *sh_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	struct shdma_chan *schan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	sh_chan = devm_kzalloc(sdev->dma_dev.dev, sizeof(struct sh_dmae_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 			       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	if (!sh_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	schan = &sh_chan->shdma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	schan->max_xfer_len = SH_DMA_TCR_MAX + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	shdma_chan_probe(sdev, schan, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	sh_chan->base = shdev->chan_reg + chan_pdata->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	/* set up channel irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	if (pdev->id >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 			 "sh-dmae%d.%d", pdev->id, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 			 "sh-dma%d", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		dev_err(sdev->dma_dev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 			"DMA channel %d request_irq error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 			id, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		goto err_no_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	shdev->chan[id] = sh_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) err_no_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	/* remove from dmaengine device node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	shdma_chan_remove(schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	struct shdma_chan *schan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	shdma_for_each_chan(schan, &shdev->shdma_dev, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		BUG_ON(!schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		shdma_chan_remove(schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static int sh_dmae_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	sh_dmae_ctl_stop(shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static int sh_dmae_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	return sh_dmae_rst(shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static int sh_dmae_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	sh_dmae_ctl_stop(shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static int sh_dmae_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	struct sh_dmae_device *shdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	ret = sh_dmae_rst(shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		dev_err(dev, "Failed to reset!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	for (i = 0; i < shdev->pdata->channel_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		struct sh_dmae_chan *sh_chan = shdev->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		if (!sh_chan->shdma_chan.desc_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		if (sh_chan->shdma_chan.slave_id >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 			const struct sh_dmae_slave_config *cfg = sh_chan->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 			dmae_set_dmars(sh_chan, cfg->mid_rid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 			dmae_set_chcr(sh_chan, cfg->chcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 			dmae_init(sh_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static const struct dev_pm_ops sh_dmae_pm = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	SET_SYSTEM_SLEEP_PM_OPS(sh_dmae_suspend, sh_dmae_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	SET_RUNTIME_PM_OPS(sh_dmae_runtime_suspend, sh_dmae_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 			   NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	struct sh_dmae_chan *sh_chan = container_of(schan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 					struct sh_dmae_chan, shdma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	 * Implicit BUG_ON(!sh_chan->config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	 * This is an exclusive slave DMA operation, may only be called after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	 * successful slave configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	return sh_chan->slave_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	return &((struct sh_dmae_desc *)buf)[i].shdma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) static const struct shdma_ops sh_dmae_shdma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	.desc_completed = sh_dmae_desc_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	.halt_channel = sh_dmae_halt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	.channel_busy = sh_dmae_channel_busy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	.slave_addr = sh_dmae_slave_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	.desc_setup = sh_dmae_desc_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	.set_slave = sh_dmae_set_slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	.setup_xfer = sh_dmae_setup_xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	.start_xfer = sh_dmae_start_xfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	.embedded_desc = sh_dmae_embedded_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	.chan_irq = sh_dmae_chan_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	.get_partial = sh_dmae_get_partial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static int sh_dmae_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	const enum dma_slave_buswidth widths =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 		DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 		DMA_SLAVE_BUSWIDTH_4_BYTES  | DMA_SLAVE_BUSWIDTH_8_BYTES |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	const struct sh_dmae_pdata *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	int chan_irq[SH_DMAE_MAX_CHANNELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	unsigned long irqflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	int err, errirq, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	struct sh_dmae_device *shdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	struct dma_device *dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	struct resource *chan, *dmars, *errirq_res, *chanirq_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	if (pdev->dev.of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 		pdata = of_device_get_match_data(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 		pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	/* get platform data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	if (!pdata || !pdata->channel_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	/* DMARS area is optional */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	 * IRQ resources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	 *    the error IRQ, in which case it is the only IRQ in this resource:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	 *    start == end. If it is the only IRQ resource, all channels also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	 *    use the same IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	 * 2. DMA channel IRQ resources can be specified one per resource or in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	 *    ranges (start != end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	 * 3. iff all events (channels and, optionally, error) on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	 *    controller use the same IRQ, only one IRQ resource can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	 *    specified, otherwise there must be one IRQ per channel, even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	 *    some of them are equal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	 * 4. if all IRQs on this controller are equal or if some specific IRQs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	 *    specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	 *    requested with the IRQF_SHARED flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	if (!chan || !errirq_res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	shdev = devm_kzalloc(&pdev->dev, sizeof(struct sh_dmae_device),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 			     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	if (!shdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	dma_dev = &shdev->shdma_dev.dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	shdev->chan_reg = devm_ioremap_resource(&pdev->dev, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	if (IS_ERR(shdev->chan_reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 		return PTR_ERR(shdev->chan_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	if (dmars) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 		shdev->dmars = devm_ioremap_resource(&pdev->dev, dmars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 		if (IS_ERR(shdev->dmars))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 			return PTR_ERR(shdev->dmars);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	dma_dev->src_addr_widths = widths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	dma_dev->dst_addr_widths = widths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	if (!pdata->slave_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	if (pdata->slave && pdata->slave_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 		dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	/* Default transfer size of 32 bytes requires 32-byte alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	shdev->shdma_dev.ops = &sh_dmae_shdma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	err = shdma_init(&pdev->dev, &shdev->shdma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 			      pdata->channel_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 		goto eshdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	/* platform data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 	shdev->pdata = pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	if (pdata->chcr_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 		shdev->chcr_offset = pdata->chcr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 		shdev->chcr_offset = CHCR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 	if (pdata->chcr_ie_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 		shdev->chcr_ie_bit = pdata->chcr_ie_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 		shdev->chcr_ie_bit = CHCR_IE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	platform_set_drvdata(pdev, shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	err = pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 		dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	spin_lock_irq(&sh_dmae_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	spin_unlock_irq(&sh_dmae_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	/* reset dma controller - only needed as a test */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	err = sh_dmae_rst(shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 		goto rst_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	if (IS_ENABLED(CONFIG_CPU_SH4) || IS_ENABLED(CONFIG_ARCH_RENESAS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 		chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 		if (!chanirq_res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 			chanirq_res = errirq_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 			irqres++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 		if (chanirq_res == errirq_res ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 		    (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 			irqflags = IRQF_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 		errirq = errirq_res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 		err = devm_request_irq(&pdev->dev, errirq, sh_dmae_err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 				       irqflags, "DMAC Address Error", shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 			dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 				"DMA failed requesting irq #%d, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 				errirq, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 			goto eirq_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 		chanirq_res = errirq_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	if (chanirq_res->start == chanirq_res->end &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	    !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 		/* Special case - all multiplexed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 		for (; irq_cnt < pdata->channel_num; irq_cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 			if (irq_cnt < SH_DMAE_MAX_CHANNELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 				chan_irq[irq_cnt] = chanirq_res->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 				chan_flag[irq_cnt] = IRQF_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 				irq_cap = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 			for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 				if (irq_cnt >= SH_DMAE_MAX_CHANNELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 					irq_cap = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 				if ((errirq_res->flags & IORESOURCE_BITS) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 				    IORESOURCE_IRQ_SHAREABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 					chan_flag[irq_cnt] = IRQF_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 					chan_flag[irq_cnt] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 				dev_dbg(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 					"Found IRQ %d for channel %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 					i, irq_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 				chan_irq[irq_cnt++] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 			if (irq_cnt >= SH_DMAE_MAX_CHANNELS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 			chanirq_res = platform_get_resource(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 						IORESOURCE_IRQ, ++irqres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 		} while (irq_cnt < pdata->channel_num && chanirq_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	/* Create DMA Channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 	for (i = 0; i < irq_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 		err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 			goto chan_probe_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 	if (irq_cap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 		dev_notice(&pdev->dev, "Attempting to register %d DMA "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 			   "channels when a maximum of %d are supported.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 			   pdata->channel_num, SH_DMAE_MAX_CHANNELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 	pm_runtime_put(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 	err = dma_async_device_register(&shdev->shdma_dev.dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 		goto edmadevreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) edmadevreg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 	pm_runtime_get(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) chan_probe_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 	sh_dmae_chan_remove(shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) eirq_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) rst_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 	spin_lock_irq(&sh_dmae_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 	list_del_rcu(&shdev->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 	spin_unlock_irq(&sh_dmae_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 	pm_runtime_put(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 	shdma_cleanup(&shdev->shdma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) eshdma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) static int sh_dmae_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 	struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 	struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 	dma_async_device_unregister(dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 	spin_lock_irq(&sh_dmae_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 	list_del_rcu(&shdev->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 	spin_unlock_irq(&sh_dmae_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 	pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 	sh_dmae_chan_remove(shdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) 	shdma_cleanup(&shdev->shdma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) static struct platform_driver sh_dmae_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 	.driver		= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 		.pm	= &sh_dmae_pm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 		.name	= SH_DMAE_DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) 	.remove		= sh_dmae_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static int __init sh_dmae_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) 	/* Wire up NMI handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) 	int err = register_die_notifier(&sh_dmae_nmi_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) 	return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) module_init(sh_dmae_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) static void __exit sh_dmae_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) 	platform_driver_unregister(&sh_dmae_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) 	unregister_die_notifier(&sh_dmae_nmi_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) module_exit(sh_dmae_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);