Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) // Copyright (C) 2019 Spreadtrum Communications Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/dma/sprd-dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/of_reserved_mem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <sound/pcm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <sound/pcm_params.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <sound/soc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "sprd-pcm-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define SPRD_PCM_DMA_LINKLIST_SIZE	64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define SPRD_PCM_DMA_BRUST_LEN		640
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) struct sprd_pcm_dma_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	void *virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	int pre_pointer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) struct sprd_pcm_dma_private {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	struct snd_pcm_substream *substream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	struct sprd_pcm_dma_params *params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct sprd_pcm_dma_data data[SPRD_PCM_CHANNEL_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	int hw_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	int dma_addr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static const struct snd_pcm_hardware sprd_pcm_hardware = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	.period_bytes_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	.period_bytes_max = 64 * 1024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	.periods_min = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	.periods_max = PAGE_SIZE / SPRD_PCM_DMA_LINKLIST_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	.buffer_bytes_max = 64 * 1024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) static int sprd_pcm_open(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 			 struct snd_pcm_substream *substream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	struct snd_pcm_runtime *runtime = substream->runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	struct device *dev = component->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct sprd_pcm_dma_private *dma_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	int hw_chan = SPRD_PCM_CHANNEL_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	int size, ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	snd_soc_set_runtime_hwparams(substream, &sprd_pcm_hardware);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	ret = snd_pcm_hw_constraint_step(runtime, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 					 SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 					 SPRD_PCM_DMA_BRUST_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	ret = snd_pcm_hw_constraint_step(runtime, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 					 SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 					 SPRD_PCM_DMA_BRUST_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	ret = snd_pcm_hw_constraint_integer(runtime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 					    SNDRV_PCM_HW_PARAM_PERIODS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	dma_private = devm_kzalloc(dev, sizeof(*dma_private), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	if (!dma_private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	size = runtime->hw.periods_max * SPRD_PCM_DMA_LINKLIST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	for (i = 0; i < hw_chan; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		struct sprd_pcm_dma_data *data = &dma_private->data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		data->virt = dmam_alloc_coherent(dev, size, &data->phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 						 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		if (!data->virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	dma_private->hw_chan = hw_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	runtime->private_data = dma_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	dma_private->substream = substream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	for (i = 0; i < hw_chan; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		struct sprd_pcm_dma_data *data = &dma_private->data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		if (data->virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			dmam_free_coherent(dev, size, data->virt, data->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	devm_kfree(dev, dma_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static int sprd_pcm_close(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			  struct snd_pcm_substream *substream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	struct snd_pcm_runtime *runtime = substream->runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct sprd_pcm_dma_private *dma_private = runtime->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	struct device *dev = component->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	int size = runtime->hw.periods_max * SPRD_PCM_DMA_LINKLIST_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	for (i = 0; i < dma_private->hw_chan; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		struct sprd_pcm_dma_data *data = &dma_private->data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		dmam_free_coherent(dev, size, data->virt, data->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	devm_kfree(dev, dma_private);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static void sprd_pcm_dma_complete(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	struct sprd_pcm_dma_private *dma_private = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	struct snd_pcm_substream *substream = dma_private->substream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	snd_pcm_period_elapsed(substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static void sprd_pcm_release_dma_channel(struct snd_pcm_substream *substream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	struct snd_pcm_runtime *runtime = substream->runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	struct sprd_pcm_dma_private *dma_private = runtime->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	for (i = 0; i < SPRD_PCM_CHANNEL_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		struct sprd_pcm_dma_data *data = &dma_private->data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		if (data->chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			dma_release_channel(data->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 			data->chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static int sprd_pcm_request_dma_channel(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 					struct snd_pcm_substream *substream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 					int channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	struct snd_pcm_runtime *runtime = substream->runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	struct sprd_pcm_dma_private *dma_private = runtime->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	struct device *dev = component->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	struct sprd_pcm_dma_params *dma_params = dma_private->params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	if (channels > SPRD_PCM_CHANNEL_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		dev_err(dev, "invalid dma channel number:%d\n", channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	for (i = 0; i < channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		struct sprd_pcm_dma_data *data = &dma_private->data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		data->chan = dma_request_slave_channel(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 						       dma_params->chan_name[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		if (!data->chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 			dev_err(dev, "failed to request dma channel:%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 				dma_params->chan_name[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			sprd_pcm_release_dma_channel(substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static int sprd_pcm_hw_params(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			      struct snd_pcm_substream *substream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			      struct snd_pcm_hw_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	struct snd_pcm_runtime *runtime = substream->runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	struct sprd_pcm_dma_private *dma_private = runtime->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	struct sprd_pcm_dma_params *dma_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	size_t totsize = params_buffer_bytes(params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	size_t period = params_period_bytes(params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	int channels = params_channels(params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	int is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	int ret, i, j, sg_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	dma_params = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	if (!dma_params) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		dev_warn(component->dev, "no dma parameters setting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		dma_private->params = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		runtime->dma_bytes = totsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	if (!dma_private->params) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		dma_private->params = dma_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		ret = sprd_pcm_request_dma_channel(component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 						   substream, channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	runtime->dma_bytes = totsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	sg_num = totsize / period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	dma_private->dma_addr_offset = totsize / channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	sg = devm_kcalloc(component->dev, sg_num, sizeof(*sg), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (!sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		goto sg_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	for (i = 0; i < channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		struct sprd_pcm_dma_data *data = &dma_private->data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		struct dma_chan *chan = data->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		struct dma_slave_config config = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		struct sprd_dma_linklist link = { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		enum dma_transfer_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		struct scatterlist *sgt = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		config.src_maxburst = dma_params->fragment_len[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		config.src_addr_width = dma_params->datawidth[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		config.dst_addr_width = dma_params->datawidth[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		if (is_playback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 			config.src_addr = runtime->dma_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 				i * dma_private->dma_addr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			config.dst_addr = dma_params->dev_phys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			dir = DMA_MEM_TO_DEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			config.src_addr = dma_params->dev_phys[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 			config.dst_addr = runtime->dma_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 				i * dma_private->dma_addr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			dir = DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		sg_init_table(sgt, sg_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		for (j = 0; j < sg_num; j++, sgt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			u32 sg_len = period / channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			sg_dma_len(sgt) = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			sg_dma_address(sgt) = runtime->dma_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 				i * dma_private->dma_addr_offset + sg_len * j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		 * Configure the link-list address for the DMA engine link-list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		 * mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		link.virt_addr = (unsigned long)data->virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		link.phy_addr = data->phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		ret = dmaengine_slave_config(chan, &config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 			dev_err(component->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 				"failed to set slave configuration: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 			goto config_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		 * We configure the DMA request mode, interrupt mode, channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		 * mode and channel trigger mode by the flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, SPRD_DMA_NO_TRG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 				       SPRD_DMA_FRAG_REQ, SPRD_DMA_TRANS_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		data->desc = chan->device->device_prep_slave_sg(chan, sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 								sg_num, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 								flags, &link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		if (!data->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 			dev_err(component->dev, "failed to prepare slave sg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 			goto config_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		if (!runtime->no_period_wakeup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 			data->desc->callback = sprd_pcm_dma_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 			data->desc->callback_param = dma_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	devm_kfree(component->dev, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) config_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	devm_kfree(component->dev, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) sg_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	sprd_pcm_release_dma_channel(substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) static int sprd_pcm_hw_free(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			    struct snd_pcm_substream *substream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	snd_pcm_set_runtime_buffer(substream, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	sprd_pcm_release_dma_channel(substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static int sprd_pcm_trigger(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 			    struct snd_pcm_substream *substream, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	struct sprd_pcm_dma_private *dma_private =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		substream->runtime->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	int ret = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	case SNDRV_PCM_TRIGGER_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		for (i = 0; i < dma_private->hw_chan; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			struct sprd_pcm_dma_data *data = &dma_private->data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 			if (!data->desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			data->cookie = dmaengine_submit(data->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			ret = dma_submit_error(data->cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 				dev_err(component->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 					"failed to submit dma request: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 					ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 				return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 			dma_async_issue_pending(data->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	case SNDRV_PCM_TRIGGER_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		for (i = 0; i < dma_private->hw_chan; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			struct sprd_pcm_dma_data *data = &dma_private->data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			if (data->chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 				dmaengine_resume(data->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	case SNDRV_PCM_TRIGGER_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		for (i = 0; i < dma_private->hw_chan; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			struct sprd_pcm_dma_data *data = &dma_private->data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			if (data->chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 				dmaengine_terminate_async(data->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	case SNDRV_PCM_TRIGGER_SUSPEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		for (i = 0; i < dma_private->hw_chan; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			struct sprd_pcm_dma_data *data = &dma_private->data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 			if (data->chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 				dmaengine_pause(data->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static snd_pcm_uframes_t sprd_pcm_pointer(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 					  struct snd_pcm_substream *substream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	struct snd_pcm_runtime *runtime = substream->runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	struct sprd_pcm_dma_private *dma_private = runtime->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	int pointer[SPRD_PCM_CHANNEL_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	int bytes_of_pointer = 0, sel_max = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	snd_pcm_uframes_t x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	struct dma_tx_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	for (i = 0; i < dma_private->hw_chan; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		struct sprd_pcm_dma_data *data = &dma_private->data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		if (!data->chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		status = dmaengine_tx_status(data->chan, data->cookie, &state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		if (status == DMA_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 			dev_err(component->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 				"failed to get dma channel %d status\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		 * We just get current transfer address from the DMA engine, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		 * we need convert to current pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		pointer[i] = state.residue - runtime->dma_addr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 			i * dma_private->dma_addr_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 			bytes_of_pointer = pointer[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			sel_max = pointer[i] < data->pre_pointer ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			sel_max ^= pointer[i] < data->pre_pointer ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			if (sel_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 				bytes_of_pointer =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 					max(pointer[i], pointer[i - 1]) << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 				bytes_of_pointer =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 					min(pointer[i], pointer[i - 1]) << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		data->pre_pointer = pointer[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	x = bytes_to_frames(runtime, bytes_of_pointer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	if (x == runtime->buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		x = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	return x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static int sprd_pcm_mmap(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 			 struct snd_pcm_substream *substream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 			 struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	struct snd_pcm_runtime *runtime = substream->runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	return remap_pfn_range(vma, vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 			       runtime->dma_addr >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 			       vma->vm_end - vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 			       vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static int sprd_pcm_new(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 			struct snd_soc_pcm_runtime *rtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	struct snd_card *card = rtd->card->snd_card;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	struct snd_pcm *pcm = rtd->pcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	struct snd_pcm_substream *substream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	substream = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	if (substream) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 					  sprd_pcm_hardware.buffer_bytes_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 					  &substream->dma_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 			dev_err(card->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 				"can't alloc playback dma buffer: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	substream = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	if (substream) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 					  sprd_pcm_hardware.buffer_bytes_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 					  &substream->dma_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 			dev_err(card->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 				"can't alloc capture dma buffer: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			snd_dma_free_pages(&pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->dma_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static void sprd_pcm_free(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 			  struct snd_pcm *pcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	struct snd_pcm_substream *substream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	for (i = 0; i < ARRAY_SIZE(pcm->streams); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		substream = pcm->streams[i].substream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		if (substream) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 			snd_dma_free_pages(&substream->dma_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 			substream->dma_buffer.area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 			substream->dma_buffer.addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static const struct snd_soc_component_driver sprd_soc_component = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	.name		= DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	.open		= sprd_pcm_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	.close		= sprd_pcm_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	.hw_params	= sprd_pcm_hw_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	.hw_free	= sprd_pcm_hw_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	.trigger	= sprd_pcm_trigger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	.pointer	= sprd_pcm_pointer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	.mmap		= sprd_pcm_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	.pcm_construct	= sprd_pcm_new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	.pcm_destruct	= sprd_pcm_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	.compress_ops	= &sprd_platform_compress_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static int sprd_soc_platform_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	ret = of_reserved_mem_device_init_by_idx(&pdev->dev, np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 			 "no reserved DMA memory for audio platform device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	ret = devm_snd_soc_register_component(&pdev->dev, &sprd_soc_component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 					      NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		dev_err(&pdev->dev, "could not register platform:%d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static const struct of_device_id sprd_pcm_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	{ .compatible = "sprd,pcm-platform", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) MODULE_DEVICE_TABLE(of, sprd_pcm_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static struct platform_driver sprd_pcm_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		.name = "sprd-pcm-audio",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		.of_match_table = sprd_pcm_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	.probe = sprd_soc_platform_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) module_platform_driver(sprd_pcm_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) MODULE_DESCRIPTION("Spreadtrum ASoC PCM DMA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) MODULE_ALIAS("platform:sprd-audio");