Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) // Renesas R-Car Audio DMAC support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) // Copyright (C) 2015 Renesas Electronics Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) // Copyright (c) 2015 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "rsnd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * Audio DMAC peri peri register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define PDMASAR		0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define PDMADAR		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define PDMACHCR	0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /* PDMACHCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define PDMACHCR_DE		(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) struct rsnd_dmaen {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct dma_chan		*chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	dma_cookie_t		cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	unsigned int		dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) struct rsnd_dmapp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	int			dmapp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	u32			chcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) struct rsnd_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct rsnd_mod		mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct rsnd_mod		*mod_from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	struct rsnd_mod		*mod_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	dma_addr_t		src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	dma_addr_t		dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		struct rsnd_dmaen en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		struct rsnd_dmapp pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	} dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) struct rsnd_dma_ctrl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	int dmaen_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	int dmapp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define rsnd_priv_to_dmac(p)	((struct rsnd_dma_ctrl *)(p)->dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define rsnd_mod_to_dma(_mod) container_of((_mod), struct rsnd_dma, mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define rsnd_dma_to_dmaen(dma)	(&(dma)->dma.en)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define rsnd_dma_to_dmapp(dma)	(&(dma)->dma.pp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) /* for DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static struct rsnd_mod_ops mem_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	.name = "mem",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) static struct rsnd_mod mem = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  *		Audio DMAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 				  struct rsnd_dai_stream *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	if (rsnd_io_is_working(io))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		rsnd_dai_period_elapsed(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static void rsnd_dmaen_complete(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	struct rsnd_mod *mod = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	rsnd_mod_interrupt(mod, __rsnd_dmaen_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) static struct dma_chan *rsnd_dmaen_request_channel(struct rsnd_dai_stream *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 						   struct rsnd_mod *mod_from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 						   struct rsnd_mod *mod_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	if ((!mod_from && !mod_to) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	    (mod_from && mod_to))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (mod_from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		return rsnd_mod_dma_req(io, mod_from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		return rsnd_mod_dma_req(io, mod_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) static int rsnd_dmaen_stop(struct rsnd_mod *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 			   struct rsnd_dai_stream *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 			   struct rsnd_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (dmaen->chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		dmaengine_terminate_all(dmaen->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static int rsnd_dmaen_cleanup(struct rsnd_mod *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			      struct rsnd_dai_stream *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			      struct rsnd_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	 * DMAEngine release uses mutex lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	 * Thus, it shouldn't be called under spinlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	 * Let's call it under prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	if (dmaen->chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		dma_release_channel(dmaen->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	dmaen->chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static int rsnd_dmaen_prepare(struct rsnd_mod *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			      struct rsnd_dai_stream *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			      struct rsnd_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	struct device *dev = rsnd_priv_to_dev(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	/* maybe suspended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	if (dmaen->chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	 * DMAEngine request uses mutex lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	 * Thus, it shouldn't be called under spinlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	 * Let's call it under prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	dmaen->chan = rsnd_dmaen_request_channel(io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 						 dma->mod_from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 						 dma->mod_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (IS_ERR_OR_NULL(dmaen->chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		dmaen->chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		dev_err(dev, "can't get dma channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static int rsnd_dmaen_start(struct rsnd_mod *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 			    struct rsnd_dai_stream *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			    struct rsnd_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	struct snd_pcm_substream *substream = io->substream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	struct device *dev = rsnd_priv_to_dev(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	struct dma_slave_config cfg = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	enum dma_slave_buswidth buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	int is_play = rsnd_io_is_play(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	 * in case of monaural data writing or reading through Audio-DMAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	 * data is always in Left Justified format, so both src and dst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	 * DMA Bus width need to be set equal to physical data width.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (rsnd_runtime_channel_original(io) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		int bits = snd_pcm_format_physical_width(runtime->format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		switch (bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			dev_err(dev, "invalid format width %d\n", bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	cfg.direction	= is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	cfg.src_addr	= dma->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	cfg.dst_addr	= dma->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	cfg.src_addr_width = buswidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	cfg.dst_addr_width = buswidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	dev_dbg(dev, "%s %pad -> %pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		rsnd_mod_name(mod),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		&cfg.src_addr, &cfg.dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	ret = dmaengine_slave_config(dmaen->chan, &cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	desc = dmaengine_prep_dma_cyclic(dmaen->chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 					 substream->runtime->dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 					 snd_pcm_lib_buffer_bytes(substream),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 					 snd_pcm_lib_period_bytes(substream),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 					 is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 					 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	desc->callback		= rsnd_dmaen_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	desc->callback_param	= rsnd_mod_get(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	dmaen->dma_len		= snd_pcm_lib_buffer_bytes(substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	dmaen->cookie = dmaengine_submit(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	if (dmaen->cookie < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		dev_err(dev, "dmaengine_submit() fail\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	dma_async_issue_pending(dmaen->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 					  struct rsnd_mod *mod, char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	struct dma_chan *chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	for_each_child_of_node(of_node, np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		if (i == rsnd_mod_id_raw(mod) && (!chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			chan = of_dma_request_slave_channel(np, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	/* It should call of_node_put(), since, it is rsnd_xxx_of_node() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	of_node_put(of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static int rsnd_dmaen_attach(struct rsnd_dai_stream *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			   struct rsnd_dma *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			   struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	/* try to get DMAEngine channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	chan = rsnd_dmaen_request_channel(io, mod_from, mod_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (IS_ERR_OR_NULL(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		/* Let's follow when -EPROBE_DEFER case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		if (PTR_ERR(chan) == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 			return PTR_ERR(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		 * DMA failed. try to PIO mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		 * see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		 *	rsnd_ssi_fallback()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		 *	rsnd_rdai_continuance_probe()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	 * use it for IPMMU if needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	 * see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	 *	rsnd_preallocate_pages()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	io->dmac_dev = chan->device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	dma_release_channel(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	dmac->dmaen_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 			      struct rsnd_dai_stream *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 			      snd_pcm_uframes_t *pointer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	struct dma_tx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	unsigned int pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	status = dmaengine_tx_status(dmaen->chan, dmaen->cookie, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	if (status == DMA_IN_PROGRESS || status == DMA_PAUSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		if (state.residue > 0 && state.residue <= dmaen->dma_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 			pos = dmaen->dma_len - state.residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	*pointer = bytes_to_frames(runtime, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static struct rsnd_mod_ops rsnd_dmaen_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	.name		= "audmac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	.prepare	= rsnd_dmaen_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	.cleanup	= rsnd_dmaen_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	.start		= rsnd_dmaen_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	.stop		= rsnd_dmaen_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	.pointer	= rsnd_dmaen_pointer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	.get_status	= rsnd_mod_get_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)  *		Audio DMAC peri peri
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static const u8 gen2_id_table_ssiu[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	/* SSI00 ~ SSI07 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	0x00, 0x01, 0x02, 0x03, 0x39, 0x3a, 0x3b, 0x3c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	/* SSI10 ~ SSI17 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	0x04, 0x05, 0x06, 0x07, 0x3d, 0x3e, 0x3f, 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	/* SSI20 ~ SSI27 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	0x08, 0x09, 0x0a, 0x0b, 0x41, 0x42, 0x43, 0x44,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	/* SSI30 ~ SSI37 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	0x0c, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	/* SSI40 ~ SSI47 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	0x0d, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	/* SSI5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	/* SSI6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	/* SSI7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	/* SSI8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	/* SSI90 ~ SSI97 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	0x12, 0x13, 0x14, 0x15, 0x53, 0x54, 0x55, 0x56,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static const u8 gen2_id_table_scu[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	0x2d, /* SCU_SRCI0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	0x2e, /* SCU_SRCI1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	0x2f, /* SCU_SRCI2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	0x30, /* SCU_SRCI3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	0x31, /* SCU_SRCI4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	0x32, /* SCU_SRCI5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	0x33, /* SCU_SRCI6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	0x34, /* SCU_SRCI7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	0x35, /* SCU_SRCI8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	0x36, /* SCU_SRCI9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static const u8 gen2_id_table_cmd[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	0x37, /* SCU_CMD0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	0x38, /* SCU_CMD1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			     struct rsnd_mod *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	struct rsnd_mod *ssi = rsnd_io_to_mod_ssi(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	struct rsnd_mod *src = rsnd_io_to_mod_src(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	const u8 *entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	int id = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	if ((mod == ssi) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	    (mod == ssiu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		int busif = rsnd_mod_id_sub(ssiu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		entry = gen2_id_table_ssiu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		size = ARRAY_SIZE(gen2_id_table_ssiu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		id = (rsnd_mod_id(mod) * 8) + busif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	} else if (mod == src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		entry = gen2_id_table_scu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		size = ARRAY_SIZE(gen2_id_table_scu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		id = rsnd_mod_id(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	} else if (mod == dvc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		entry = gen2_id_table_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		size = ARRAY_SIZE(gen2_id_table_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		id = rsnd_mod_id(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	if ((!entry) || (size <= id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		struct device *dev = rsnd_priv_to_dev(rsnd_io_to_priv(io));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		dev_err(dev, "unknown connection (%s)\n", rsnd_mod_name(mod));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		/* use non-prohibited SRS number as error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		return 0x00; /* SSI00 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	return entry[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static u32 rsnd_dmapp_get_chcr(struct rsnd_dai_stream *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 			       struct rsnd_mod *mod_from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			       struct rsnd_mod *mod_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	return	(rsnd_dmapp_get_id(io, mod_from) << 24) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		(rsnd_dmapp_get_id(io, mod_to) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) #define rsnd_dmapp_addr(dmac, dma, reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	(dmac->base + 0x20 + reg + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	 (0x10 * rsnd_dma_to_dmapp(dma)->dmapp_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	struct rsnd_mod *mod = rsnd_mod_get(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	struct device *dev = rsnd_priv_to_dev(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	dev_dbg(dev, "w 0x%px : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static u32 rsnd_dmapp_read(struct rsnd_dma *dma, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	struct rsnd_mod *mod = rsnd_mod_get(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	return ioread32(rsnd_dmapp_addr(dmac, dma, reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static void rsnd_dmapp_bset(struct rsnd_dma *dma, u32 data, u32 mask, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	struct rsnd_mod *mod = rsnd_mod_get(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	void __iomem *addr = rsnd_dmapp_addr(dmac, dma, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	u32 val = ioread32(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	val &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	val |= (data & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	iowrite32(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static int rsnd_dmapp_stop(struct rsnd_mod *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			   struct rsnd_dai_stream *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 			   struct rsnd_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	rsnd_dmapp_bset(dma, 0,  PDMACHCR_DE, PDMACHCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	for (i = 0; i < 1024; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		if (0 == (rsnd_dmapp_read(dma, PDMACHCR) & PDMACHCR_DE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static int rsnd_dmapp_start(struct rsnd_mod *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 			    struct rsnd_dai_stream *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 			    struct rsnd_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	rsnd_dmapp_write(dma, dma->src_addr,	PDMASAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	rsnd_dmapp_write(dma, dma->dst_addr,	PDMADAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	rsnd_dmapp_write(dma, dmapp->chcr,	PDMACHCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static int rsnd_dmapp_attach(struct rsnd_dai_stream *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 			     struct rsnd_dma *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 			     struct rsnd_mod *mod_from, struct rsnd_mod *mod_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	struct rsnd_dmapp *dmapp = rsnd_dma_to_dmapp(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	struct device *dev = rsnd_priv_to_dev(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	dmapp->dmapp_id = dmac->dmapp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	dmapp->chcr = rsnd_dmapp_get_chcr(io, mod_from, mod_to) | PDMACHCR_DE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	dmac->dmapp_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	dev_dbg(dev, "id/src/dst/chcr = %d/%pad/%pad/%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		dmapp->dmapp_id, &dma->src_addr, &dma->dst_addr, dmapp->chcr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static struct rsnd_mod_ops rsnd_dmapp_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	.name		= "audmac-pp",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	.start		= rsnd_dmapp_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	.stop		= rsnd_dmapp_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	.quit		= rsnd_dmapp_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	.get_status	= rsnd_mod_get_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)  *		Common DMAC Interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)  *	DMA read/write register offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)  *	RSND_xxx_I_N	for Audio DMAC input
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)  *	RSND_xxx_O_N	for Audio DMAC output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)  *	RSND_xxx_I_P	for Audio DMAC peri peri input
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)  *	RSND_xxx_O_P	for Audio DMAC peri peri output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)  *	ex) R-Car H2 case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)  *	      mod        / DMAC in    / DMAC out   / DMAC PP in / DMAC pp out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)  *	SSI : 0xec541000 / 0xec241008 / 0xec24100c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)  *	SSIU: 0xec541000 / 0xec100000 / 0xec100000 / 0xec400000 / 0xec400000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)  *	SCU : 0xec500000 / 0xec000000 / 0xec004000 / 0xec300000 / 0xec304000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)  *	CMD : 0xec500000 /            / 0xec008000                0xec308000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) #define RDMA_SSI_I_N(addr, i)	(addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) #define RDMA_SSI_O_N(addr, i)	(addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) #define RDMA_SSIU_I_N(addr, i, j) (addr ##_reg - 0x00441000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) #define RDMA_SSIU_O_N(addr, i, j) RDMA_SSIU_I_N(addr, i, j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) #define RDMA_SSIU_I_P(addr, i, j) (addr ##_reg - 0x00141000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400) - (0x4000 * ((i) / 9) * ((j) / 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) #define RDMA_SSIU_O_P(addr, i, j) RDMA_SSIU_I_P(addr, i, j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) #define RDMA_SRC_I_N(addr, i)	(addr ##_reg - 0x00500000 + (0x400 * i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) #define RDMA_SRC_O_N(addr, i)	(addr ##_reg - 0x004fc000 + (0x400 * i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #define RDMA_SRC_I_P(addr, i)	(addr ##_reg - 0x00200000 + (0x400 * i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) #define RDMA_SRC_O_P(addr, i)	(addr ##_reg - 0x001fc000 + (0x400 * i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) #define RDMA_CMD_O_N(addr, i)	(addr ##_reg - 0x004f8000 + (0x400 * i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) #define RDMA_CMD_O_P(addr, i)	(addr ##_reg - 0x001f8000 + (0x400 * i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static dma_addr_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 		   struct rsnd_mod *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		   int is_play, int is_from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	struct device *dev = rsnd_priv_to_dev(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	phys_addr_t ssi_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SSI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	phys_addr_t src_reg = rsnd_gen_get_phy_addr(priv, RSND_GEN2_SCU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	int is_ssi = !!(rsnd_io_to_mod_ssi(io) == mod) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		     !!(rsnd_io_to_mod_ssiu(io) == mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	int use_src = !!rsnd_io_to_mod_src(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	int use_cmd = !!rsnd_io_to_mod_dvc(io) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		      !!rsnd_io_to_mod_mix(io) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		      !!rsnd_io_to_mod_ctu(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	int id = rsnd_mod_id(mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	int busif = rsnd_mod_id_sub(rsnd_io_to_mod_ssiu(io));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	struct dma_addr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		dma_addr_t out_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		dma_addr_t in_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	} dma_addrs[3][2][3] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		/* SRC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		/* Capture */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		{{{ 0,				0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		  { RDMA_SRC_O_N(src, id),	RDMA_SRC_I_P(src, id) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		  { RDMA_CMD_O_N(src, id),	RDMA_SRC_I_P(src, id) } },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 		 /* Playback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		 {{ 0,				0, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		  { RDMA_SRC_O_P(src, id),	RDMA_SRC_I_N(src, id) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		  { RDMA_CMD_O_P(src, id),	RDMA_SRC_I_N(src, id) } }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		/* SSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		/* Capture */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		{{{ RDMA_SSI_O_N(ssi, id),		0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		  { RDMA_SSIU_O_P(ssi, id, busif),	0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		  { RDMA_SSIU_O_P(ssi, id, busif),	0 } },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		 /* Playback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		 {{ 0,			RDMA_SSI_I_N(ssi, id) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		  { 0,			RDMA_SSIU_I_P(ssi, id, busif) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		  { 0,			RDMA_SSIU_I_P(ssi, id, busif) } }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		/* SSIU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		/* Capture */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		{{{ RDMA_SSIU_O_N(ssi, id, busif),	0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		  { RDMA_SSIU_O_P(ssi, id, busif),	0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		  { RDMA_SSIU_O_P(ssi, id, busif),	0 } },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		 /* Playback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		 {{ 0,			RDMA_SSIU_I_N(ssi, id, busif) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		  { 0,			RDMA_SSIU_I_P(ssi, id, busif) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		  { 0,			RDMA_SSIU_I_P(ssi, id, busif) } } },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	 * FIXME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	 * We can't support SSI9-4/5/6/7, because its address is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	 * out of calculation rule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	if ((id == 9) && (busif >= 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		dev_err(dev, "This driver doesn't support SSI%d-%d, so far",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 			id, busif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	/* it shouldn't happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	if (use_cmd && !use_src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		dev_err(dev, "DVC is selected without SRC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	/* use SSIU or SSI ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	if (is_ssi && rsnd_ssi_use_busif(io))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		is_ssi++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	return (is_from) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		dma_addrs[is_ssi][is_play][use_src + use_cmd].out_addr :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		dma_addrs[is_ssi][is_play][use_src + use_cmd].in_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static dma_addr_t rsnd_dma_addr(struct rsnd_dai_stream *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 				struct rsnd_mod *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 				int is_play, int is_from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	 * gen1 uses default DMA addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	if (rsnd_is_gen1(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	if (!mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	return rsnd_gen2_dma_addr(io, mod, is_play, is_from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) #define MOD_MAX (RSND_MOD_MAX + 1) /* +Memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) static void rsnd_dma_of_path(struct rsnd_mod *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 			     struct rsnd_dai_stream *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 			     int is_play,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 			     struct rsnd_mod **mod_from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 			     struct rsnd_mod **mod_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	struct rsnd_mod *ssi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	struct rsnd_mod *src = rsnd_io_to_mod_src(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	struct rsnd_mod *ctu = rsnd_io_to_mod_ctu(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	struct rsnd_mod *mod[MOD_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	struct rsnd_mod *mod_start, *mod_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	struct rsnd_priv *priv = rsnd_mod_to_priv(this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	struct device *dev = rsnd_priv_to_dev(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	int nr, i, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	 * It should use "rcar_sound,ssiu" on DT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	 * But, we need to keep compatibility for old version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	 * If it has "rcar_sound.ssiu", it will be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	 * If not, "rcar_sound.ssi" will be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	 * see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	 *	rsnd_ssiu_dma_req()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	 *	rsnd_ssi_dma_req()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	if (rsnd_ssiu_of_node(priv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 		struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 		/* use SSIU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		ssi = ssiu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 		if (this == rsnd_io_to_mod_ssi(io))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 			this = ssiu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		/* keep compatible, use SSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 		ssi = rsnd_io_to_mod_ssi(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	if (!ssi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	for (i = 0; i < MOD_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 		mod[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		nr += !!rsnd_io_to_mod(io, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	 * [S] -*-> [E]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	 * [S] -*-> SRC -o-> [E]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	 * [S] -*-> SRC -> DVC -o-> [E]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	 * [S] -*-> SRC -> CTU -> MIX -> DVC -o-> [E]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	 * playback	[S] = mem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	 *		[E] = SSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	 * capture	[S] = SSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	 *		[E] = mem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	 * -*->		Audio DMAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	 * -o->		Audio DMAC peri peri
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	mod_start	= (is_play) ? NULL : ssi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	mod_end		= (is_play) ? ssi  : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 	mod[idx++] = mod_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	for (i = 1; i < nr; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 		if (src) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 			mod[idx++] = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 			src = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 		} else if (ctu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 			mod[idx++] = ctu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 			ctu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 		} else if (mix) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 			mod[idx++] = mix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 			mix = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 		} else if (dvc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 			mod[idx++] = dvc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 			dvc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	mod[idx] = mod_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	 *		| SSI | SRC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	 * -------------+-----+-----+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	 *  is_play	|  o  |  *  |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	 * !is_play	|  *  |  o  |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	if ((this == ssi) == (is_play)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 		*mod_from	= mod[idx - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 		*mod_to		= mod[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 		*mod_from	= mod[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 		*mod_to		= mod[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	dev_dbg(dev, "module connection (this is %s)\n", rsnd_mod_name(this));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	for (i = 0; i <= idx; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 		dev_dbg(dev, "  %s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 			rsnd_mod_name(mod[i] ? mod[i] : &mem),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 			(mod[i] == *mod_from) ? " from" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 			(mod[i] == *mod_to)   ? " to" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static int rsnd_dma_alloc(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 			  struct rsnd_mod **dma_mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	struct rsnd_mod *mod_from = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	struct rsnd_mod *mod_to = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 	struct rsnd_priv *priv = rsnd_io_to_priv(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	struct device *dev = rsnd_priv_to_dev(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	struct rsnd_dma *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	struct rsnd_mod_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	enum rsnd_mod_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	int (*attach)(struct rsnd_dai_stream *io, struct rsnd_dma *dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 		      struct rsnd_mod *mod_from, struct rsnd_mod *mod_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	int is_play = rsnd_io_is_play(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	int ret, dma_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 	 * DMA failed. try to PIO mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	 * see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	 *	rsnd_ssi_fallback()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	 *	rsnd_rdai_continuance_probe()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	if (!dmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 	/* for Gen2 or later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	if (mod_from && mod_to) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 		ops	= &rsnd_dmapp_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 		attach	= rsnd_dmapp_attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 		dma_id	= dmac->dmapp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 		type	= RSND_MOD_AUDMAPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 		ops	= &rsnd_dmaen_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 		attach	= rsnd_dmaen_attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 		dma_id	= dmac->dmaen_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 		type	= RSND_MOD_AUDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	/* for Gen1, overwrite */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	if (rsnd_is_gen1(priv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 		ops	= &rsnd_dmaen_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 		attach	= rsnd_dmaen_attach;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 		dma_id	= dmac->dmaen_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 		type	= RSND_MOD_AUDMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 	if (!dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	*dma_mod = rsnd_mod_get(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 	ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 			    type, dma_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	dev_dbg(dev, "%s %s -> %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 		rsnd_mod_name(*dma_mod),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 		rsnd_mod_name(mod_from ? mod_from : &mem),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 		rsnd_mod_name(mod_to   ? mod_to   : &mem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	ret = attach(io, dma, mod_from, mod_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 	dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	dma->dst_addr = rsnd_dma_addr(io, mod_to,   is_play, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 	dma->mod_from = mod_from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 	dma->mod_to   = mod_to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 		    struct rsnd_mod **dma_mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 	if (!(*dma_mod)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 		int ret = rsnd_dma_alloc(io, mod, dma_mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 	return rsnd_dai_connect(*dma_mod, io, (*dma_mod)->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) int rsnd_dma_probe(struct rsnd_priv *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	struct platform_device *pdev = rsnd_priv_to_pdev(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 	struct device *dev = rsnd_priv_to_dev(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 	struct rsnd_dma_ctrl *dmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 	 * for Gen1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 	if (rsnd_is_gen1(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 	 * for Gen2 or later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "audmapp");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 	dmac = devm_kzalloc(dev, sizeof(*dmac), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 	if (!dmac || !res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 		dev_err(dev, "dma allocate failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 		return 0; /* it will be PIO mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 	dmac->dmapp_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 	dmac->base = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 	if (IS_ERR(dmac->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 		return PTR_ERR(dmac->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 	priv->dma = dmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 	/* dummy mem mod for debug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 	return rsnd_mod_init(NULL, &mem, &mem_ops, NULL, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }