^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) STMicroelectronics SA 2017
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * DMA Router driver for STM32 DMA MUX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Based on TI DMA Crossbar driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/reset.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define STM32_DMAMUX_CCR(x) (0x4 * (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define STM32_DMAMUX_MAX_DMA_REQUESTS 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define STM32_DMAMUX_MAX_REQUESTS 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct stm32_dmamux {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u32 master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) u32 request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) u32 chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct stm32_dmamux_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct dma_router dmarouter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) void __iomem *iomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) u32 dma_requests; /* Number of DMA requests connected to DMAMUX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) u32 dmamux_requests; /* Number of DMA requests routed toward DMAs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) spinlock_t lock; /* Protects register access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned long *dma_inuse; /* Used DMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u32 ccr[STM32_DMAMUX_MAX_DMA_REQUESTS]; /* Used to backup CCR register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * in suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u32 dma_reqs[]; /* Number of DMA Request per DMA masters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * [0] holds number of DMA Masters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * To be kept at very end end of this structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static inline u32 stm32_dmamux_read(void __iomem *iomem, u32 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return readl_relaxed(iomem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static inline void stm32_dmamux_write(void __iomem *iomem, u32 reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) writel_relaxed(val, iomem + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static void stm32_dmamux_free(struct device *dev, void *route_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct stm32_dmamux_data *dmamux = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct stm32_dmamux *mux = route_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /* Clear dma request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) spin_lock_irqsave(&dmamux->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) clear_bit(mux->chan_id, dmamux->dma_inuse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) spin_unlock_irqrestore(&dmamux->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) dev_dbg(dev, "Unmapping DMAMUX(%u) to DMA%u(%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) mux->request, mux->master, mux->chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) kfree(mux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct stm32_dmamux_data *dmamux = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct stm32_dmamux *mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u32 i, min, max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (dma_spec->args_count != 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) dev_err(&pdev->dev, "invalid number of dma mux args\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (dma_spec->args[0] > dmamux->dmamux_requests) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) dev_err(&pdev->dev, "invalid mux request number: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) dma_spec->args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) mux = kzalloc(sizeof(*mux), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (!mux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) spin_lock_irqsave(&dmamux->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) dmamux->dma_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (mux->chan_id == dmamux->dma_requests) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) spin_unlock_irqrestore(&dmamux->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) dev_err(&pdev->dev, "Run out of free DMA requests\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) goto error_chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) set_bit(mux->chan_id, dmamux->dma_inuse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) spin_unlock_irqrestore(&dmamux->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Look for DMA Master */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) for (i = 1, min = 0, max = dmamux->dma_reqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) i <= dmamux->dma_reqs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) min += dmamux->dma_reqs[i], max += dmamux->dma_reqs[++i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (mux->chan_id < max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) mux->master = i - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* The of_node_put() will be done in of_dma_router_xlate function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", i - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (!dma_spec->np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) dev_err(&pdev->dev, "can't get dma master\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Set dma request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) spin_lock_irqsave(&dmamux->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) ret = pm_runtime_resume_and_get(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) spin_unlock_irqrestore(&dmamux->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_unlock_irqrestore(&dmamux->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) mux->request = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* craft DMA spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) dma_spec->args[3] = dma_spec->args[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) dma_spec->args[2] = dma_spec->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) dma_spec->args[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) dma_spec->args[0] = mux->chan_id - min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) dma_spec->args_count = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) stm32_dmamux_write(dmamux->iomem, STM32_DMAMUX_CCR(mux->chan_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) mux->request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) dev_dbg(&pdev->dev, "Mapping DMAMUX(%u) to DMA%u(%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) mux->request, mux->master, mux->chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) clear_bit(mux->chan_id, dmamux->dma_inuse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) error_chan_id:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) kfree(mux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static const struct of_device_id stm32_stm32dma_master_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) { .compatible = "st,stm32-dma", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static int stm32_dmamux_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct device_node *node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct device_node *dma_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct stm32_dmamux_data *stm32_dmamux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) void __iomem *iomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct reset_control *rst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) int i, count, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u32 dma_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) count = device_property_count_u32(&pdev->dev, "dma-masters");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (count < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) dev_err(&pdev->dev, "Can't get DMA master(s) node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) stm32_dmamux = devm_kzalloc(&pdev->dev, sizeof(*stm32_dmamux) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) sizeof(u32) * (count + 1), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (!stm32_dmamux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) dma_req = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) for (i = 1; i <= count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) dma_node = of_parse_phandle(node, "dma-masters", i - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) match = of_match_node(stm32_stm32dma_master_match, dma_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) dev_err(&pdev->dev, "DMA master is not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) of_node_put(dma_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (of_property_read_u32(dma_node, "dma-requests",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) &stm32_dmamux->dma_reqs[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) dev_info(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) "Missing MUX output information, using %u.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) STM32_DMAMUX_MAX_DMA_REQUESTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) stm32_dmamux->dma_reqs[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) STM32_DMAMUX_MAX_DMA_REQUESTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) dma_req += stm32_dmamux->dma_reqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) of_node_put(dma_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (dma_req > STM32_DMAMUX_MAX_DMA_REQUESTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) dev_err(&pdev->dev, "Too many DMA Master Requests to manage\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) stm32_dmamux->dma_requests = dma_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) stm32_dmamux->dma_reqs[0] = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) stm32_dmamux->dma_inuse = devm_kcalloc(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) BITS_TO_LONGS(dma_req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) sizeof(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (!stm32_dmamux->dma_inuse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (device_property_read_u32(&pdev->dev, "dma-requests",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) &stm32_dmamux->dmamux_requests)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) stm32_dmamux->dmamux_requests = STM32_DMAMUX_MAX_REQUESTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) dev_warn(&pdev->dev, "DMAMUX defaulting on %u requests\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) stm32_dmamux->dmamux_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) pm_runtime_get_noresume(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) iomem = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (IS_ERR(iomem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return PTR_ERR(iomem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) spin_lock_init(&stm32_dmamux->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) stm32_dmamux->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (IS_ERR(stm32_dmamux->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return dev_err_probe(&pdev->dev, PTR_ERR(stm32_dmamux->clk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) "Missing clock controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ret = clk_prepare_enable(stm32_dmamux->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) rst = devm_reset_control_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (IS_ERR(rst)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ret = PTR_ERR(rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (ret == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) goto err_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) reset_control_assert(rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) udelay(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) reset_control_deassert(rst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) stm32_dmamux->iomem = iomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) stm32_dmamux->dmarouter.dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) stm32_dmamux->dmarouter.route_free = stm32_dmamux_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) platform_set_drvdata(pdev, stm32_dmamux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) pm_runtime_set_active(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) pm_runtime_get_noresume(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* Reset the dmamux */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) for (i = 0; i < stm32_dmamux->dma_requests; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) pm_runtime_put(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) &stm32_dmamux->dmarouter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) goto pm_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) pm_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) err_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) clk_disable_unprepare(stm32_dmamux->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static int stm32_dmamux_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) clk_disable_unprepare(stm32_dmamux->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static int stm32_dmamux_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ret = clk_prepare_enable(stm32_dmamux->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) dev_err(&pdev->dev, "failed to prepare_enable clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static int stm32_dmamux_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) ret = pm_runtime_resume_and_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) for (i = 0; i < stm32_dmamux->dma_requests; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) stm32_dmamux->ccr[i] = stm32_dmamux_read(stm32_dmamux->iomem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) STM32_DMAMUX_CCR(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) pm_runtime_force_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static int stm32_dmamux_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ret = pm_runtime_force_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ret = pm_runtime_resume_and_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) for (i = 0; i < stm32_dmamux->dma_requests; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) stm32_dmamux_write(stm32_dmamux->iomem, STM32_DMAMUX_CCR(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) stm32_dmamux->ccr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static const struct dev_pm_ops stm32_dmamux_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) SET_SYSTEM_SLEEP_PM_OPS(stm32_dmamux_suspend, stm32_dmamux_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) SET_RUNTIME_PM_OPS(stm32_dmamux_runtime_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) stm32_dmamux_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static const struct of_device_id stm32_dmamux_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) { .compatible = "st,stm32h7-dmamux" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static struct platform_driver stm32_dmamux_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) .probe = stm32_dmamux_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) .name = "stm32-dmamux",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) .of_match_table = stm32_dmamux_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) .pm = &stm32_dmamux_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static int __init stm32_dmamux_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return platform_driver_register(&stm32_dmamux_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) arch_initcall(stm32_dmamux_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) MODULE_DESCRIPTION("DMA Router driver for STM32 DMA MUX");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) MODULE_LICENSE("GPL v2");