^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) // Copyright (C) 2013, Analog Devices Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) // Author: Lars-Peter Clausen <lars@metafoo.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <sound/pcm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <sound/pcm_params.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <sound/soc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <sound/dmaengine_pcm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static unsigned int prealloc_buffer_size_kbytes = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) module_param(prealloc_buffer_size_kbytes, uint, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) MODULE_PARM_DESC(prealloc_buffer_size_kbytes, "Preallocate DMA buffer size (KB).");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * The platforms dmaengine driver does not support reporting the amount of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * bytes that are still left to transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define SND_DMAENGINE_PCM_FLAG_NO_RESIDUE BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct snd_pcm_substream *substream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (!pcm->chan[substream->stream])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return pcm->chan[substream->stream]->device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * snd_dmaengine_pcm_prepare_slave_config() - Generic prepare_slave_config callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * @substream: PCM substream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * @params: hw_params
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * @slave_config: DMA slave config to prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * This function can be used as a generic prepare_slave_config callback for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * platforms which make use of the snd_dmaengine_dai_dma_data struct for their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * DAI DMA data. Internally the function will first call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * snd_hwparams_to_dma_slave_config to fill in the slave config based on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * hw_params, followed by snd_dmaengine_set_config_from_dai_data to fill in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * remaining fields based on the DAI DMA data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct snd_dmaengine_dai_dma_data *dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (rtd->num_cpus > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) dev_err(rtd->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) "%s doesn't support Multi CPU yet\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) snd_dmaengine_pcm_set_config_from_dai_data(substream, dma_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) slave_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_prepare_slave_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static int dmaengine_pcm_hw_params(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct snd_pcm_substream *substream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct snd_pcm_hw_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int (*prepare_slave_config)(struct snd_pcm_substream *substream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct snd_pcm_hw_params *params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct dma_slave_config *slave_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct dma_slave_config slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) memset(&slave_config, 0, sizeof(slave_config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (pcm->config && pcm->config->prepare_slave_config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) prepare_slave_config = pcm->config->prepare_slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (prepare_slave_config) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ret = prepare_slave_config(substream, params, &slave_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ret = dmaengine_slave_config(chan, &slave_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) dmaengine_pcm_set_runtime_hwparams(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct snd_pcm_substream *substream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct device *dma_dev = dmaengine_dma_dev(pcm, substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct dma_chan *chan = pcm->chan[substream->stream];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct snd_dmaengine_dai_dma_data *dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct snd_pcm_hardware hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (rtd->num_cpus > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) dev_err(rtd->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) "%s doesn't support Multi CPU yet\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (pcm->config && pcm->config->pcm_hardware)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return snd_soc_set_runtime_hwparams(substream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) pcm->config->pcm_hardware);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) memset(&hw, 0, sizeof(hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) hw.info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) SNDRV_PCM_INFO_INTERLEAVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) hw.periods_min = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) hw.periods_max = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) hw.period_bytes_min = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) hw.period_bytes_max = dma_get_max_seg_size(dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) hw.buffer_bytes_max = SIZE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) hw.fifo_size = dma_data->fifo_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) hw.info |= SNDRV_PCM_INFO_BATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * FIXME: Remove the return value check to align with the code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * before adding snd_dmaengine_pcm_refine_runtime_hwparams
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) snd_dmaengine_pcm_refine_runtime_hwparams(substream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) dma_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) &hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return snd_soc_set_runtime_hwparams(substream, &hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static int dmaengine_pcm_open(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct snd_pcm_substream *substream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct dma_chan *chan = pcm->chan[substream->stream];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ret = dmaengine_pcm_set_runtime_hwparams(component, substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return snd_dmaengine_pcm_open(substream, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static int dmaengine_pcm_close(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct snd_pcm_substream *substream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return snd_dmaengine_pcm_close(substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static int dmaengine_pcm_trigger(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct snd_pcm_substream *substream, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return snd_dmaengine_pcm_trigger(substream, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) static struct dma_chan *dmaengine_pcm_compat_request_channel(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct snd_soc_pcm_runtime *rtd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct snd_pcm_substream *substream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct snd_dmaengine_dai_dma_data *dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) dma_filter_fn fn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (rtd->num_cpus > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) dev_err(rtd->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) "%s doesn't support Multi CPU yet\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) dma_data = snd_soc_dai_get_dma_data(asoc_rtd_to_cpu(rtd, 0), substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX) && pcm->chan[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return pcm->chan[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (pcm->config && pcm->config->compat_request_channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return pcm->config->compat_request_channel(rtd, substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (pcm->config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) fn = pcm->config->compat_filter_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return snd_dmaengine_pcm_request_channel(fn, dma_data->filter_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static bool dmaengine_pcm_can_report_residue(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct dma_slave_caps dma_caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ret = dma_get_slave_caps(chan, &dma_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) dev_warn(dev, "Failed to get DMA channel capabilities, falling back to period counting: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (dma_caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static int dmaengine_pcm_new(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct snd_soc_pcm_runtime *rtd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) const struct snd_dmaengine_pcm_config *config = pcm->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct device *dev = component->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct snd_pcm_substream *substream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) size_t prealloc_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) size_t max_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (config && config->prealloc_buffer_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) prealloc_buffer_size = config->prealloc_buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) max_buffer_size = config->pcm_hardware->buffer_bytes_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) prealloc_buffer_size = prealloc_buffer_size_kbytes * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) max_buffer_size = SIZE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) for_each_pcm_streams(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) substream = rtd->pcm->streams[i].substream;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (!substream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (!pcm->chan[i] && config && config->chan_names[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) pcm->chan[i] = dma_request_slave_channel(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) config->chan_names[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (!pcm->chan[i] && (pcm->flags & SND_DMAENGINE_PCM_FLAG_COMPAT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) pcm->chan[i] = dmaengine_pcm_compat_request_channel(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) component, rtd, substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (!pcm->chan[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) dev_err(component->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) "Missing dma channel for stream: %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) snd_pcm_set_managed_buffer(substream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) SNDRV_DMA_TYPE_DEV_IRAM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) dmaengine_dma_dev(pcm, substream),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) prealloc_buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) max_buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (rtd->pcm->streams[i].pcm->name[0] == '\0') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) strscpy_pad(rtd->pcm->streams[i].pcm->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) rtd->pcm->streams[i].pcm->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) sizeof(rtd->pcm->streams[i].pcm->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static snd_pcm_uframes_t dmaengine_pcm_pointer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct snd_pcm_substream *substream)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return snd_dmaengine_pcm_pointer_no_residue(substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return snd_dmaengine_pcm_pointer(substream);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static int dmaengine_copy_user(struct snd_soc_component *component,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct snd_pcm_substream *substream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int channel, unsigned long hwoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) void __user *buf, unsigned long bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct snd_pcm_runtime *runtime = substream->runtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct dmaengine_pcm *pcm = soc_component_to_pcm(component);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) int (*process)(struct snd_pcm_substream *substream,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) int channel, unsigned long hwoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) void *buf, unsigned long bytes) = pcm->config->process;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) bool is_playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) void *dma_ptr = runtime->dma_area + hwoff +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) channel * (runtime->dma_bytes / runtime->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (is_playback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (copy_from_user(dma_ptr, buf, bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (process) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ret = process(substream, channel, hwoff, (__force void *)buf, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (!is_playback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (copy_to_user(buf, dma_ptr, bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static const struct snd_soc_component_driver dmaengine_pcm_component = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) .name = SND_DMAENGINE_PCM_DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) .probe_order = SND_SOC_COMP_ORDER_LATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) .open = dmaengine_pcm_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) .close = dmaengine_pcm_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) .hw_params = dmaengine_pcm_hw_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) .trigger = dmaengine_pcm_trigger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) .pointer = dmaengine_pcm_pointer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) .pcm_construct = dmaengine_pcm_new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static const struct snd_soc_component_driver dmaengine_pcm_component_process = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) .name = SND_DMAENGINE_PCM_DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) .probe_order = SND_SOC_COMP_ORDER_LATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) .open = dmaengine_pcm_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) .close = dmaengine_pcm_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) .hw_params = dmaengine_pcm_hw_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) .trigger = dmaengine_pcm_trigger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) .pointer = dmaengine_pcm_pointer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) .copy_user = dmaengine_copy_user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) .pcm_construct = dmaengine_pcm_new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static const char * const dmaengine_pcm_dma_channel_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) [SNDRV_PCM_STREAM_PLAYBACK] = "tx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) [SNDRV_PCM_STREAM_CAPTURE] = "rx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static int dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct device *dev, const struct snd_dmaengine_pcm_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if ((pcm->flags & SND_DMAENGINE_PCM_FLAG_NO_DT) || (!dev->of_node &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) !(config && config->dma_dev && config->dma_dev->of_node)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (config && config->dma_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * If this warning is seen, it probably means that your Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * device structure does not match your HW device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * It would be best to refactor the Linux device structure to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * correctly match the HW structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) dev_warn(dev, "DMA channels sourced from device %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) dev_name(config->dma_dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) dev = config->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) for_each_pcm_streams(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) name = "rx-tx";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) name = dmaengine_pcm_dma_channel_names[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (config && config->chan_names[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) name = config->chan_names[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) chan = dma_request_chan(dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (IS_ERR(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Only report probe deferral errors, channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * might not be present for devices that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * support only TX or only RX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (PTR_ERR(chan) == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) pcm->chan[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) pcm->chan[i] = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) pcm->chan[1] = pcm->chan[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) for_each_pcm_streams(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (!pcm->chan[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) dma_release_channel(pcm->chan[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * snd_dmaengine_pcm_register - Register a dmaengine based PCM device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * @dev: The parent device for the PCM device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * @config: Platform specific PCM configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * @flags: Platform specific quirks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) int snd_dmaengine_pcm_register(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) const struct snd_dmaengine_pcm_config *config, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) const struct snd_soc_component_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct dmaengine_pcm *pcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!pcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) pcm->component.debugfs_prefix = "dma";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) pcm->config = config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) pcm->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ret = dmaengine_pcm_request_chan_of(pcm, dev, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) goto err_free_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (config && config->process)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) driver = &dmaengine_pcm_component_process;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) driver = &dmaengine_pcm_component;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ret = snd_soc_component_initialize(&pcm->component, driver, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) goto err_free_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ret = snd_soc_add_component(&pcm->component, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) goto err_free_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) err_free_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) dmaengine_pcm_release_chan(pcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) kfree(pcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * snd_dmaengine_pcm_unregister - Removes a dmaengine based PCM device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * @dev: Parent device the PCM was register with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * Removes a dmaengine based PCM device previously registered with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * snd_dmaengine_pcm_register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) void snd_dmaengine_pcm_unregister(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct snd_soc_component *component;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct dmaengine_pcm *pcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) component = snd_soc_lookup_component(dev, SND_DMAENGINE_PCM_DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (!component)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) pcm = soc_component_to_pcm(component);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) snd_soc_unregister_component_by_driver(dev, component->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) dmaengine_pcm_release_chan(pcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) kfree(pcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) MODULE_LICENSE("GPL");