^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Platform driver for the Synopsys DesignWare DMA Controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2007-2008 Atmel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2010-2011 ST Microelectronics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2013 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct dw_dma *dw = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct dw_dma_slave slave = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) .dma_dev = dw->dma.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) dma_cap_mask_t cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) if (dma_spec->args_count < 3 || dma_spec->args_count > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) slave.src_id = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) slave.dst_id = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) slave.m_master = dma_spec->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) slave.p_master = dma_spec->args[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (dma_spec->args_count >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) slave.channels = dma_spec->args[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) if (WARN_ON(slave.src_id >= DW_DMA_MAX_NR_REQUESTS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) slave.dst_id >= DW_DMA_MAX_NR_REQUESTS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) slave.m_master >= dw->pdata->nr_masters ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) slave.p_master >= dw->pdata->nr_masters ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) slave.channels >= BIT(dw->pdata->nr_channels)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) dma_cap_zero(cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) dma_cap_set(DMA_SLAVE, cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* TODO: there should be a simpler way to do this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return dma_request_channel(cap, dw_dma_filter, &slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct dw_dma_platform_data *dw_dma_parse_dt(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct device_node *np = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct dw_dma_platform_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u32 tmp, arr[DW_DMA_MAX_NR_MASTERS], mb[DW_DMA_MAX_NR_CHANNELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) u32 nr_masters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u32 nr_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (!np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) dev_err(&pdev->dev, "Missing DT data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (of_property_read_u32(np, "dma-masters", &nr_masters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (nr_masters < 1 || nr_masters > DW_DMA_MAX_NR_MASTERS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (of_property_read_u32(np, "dma-channels", &nr_channels))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (nr_channels > DW_DMA_MAX_NR_CHANNELS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (!pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) pdata->nr_masters = nr_masters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) pdata->nr_channels = nr_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!of_property_read_u32(np, "chan_allocation_order", &tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) pdata->chan_allocation_order = (unsigned char)tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!of_property_read_u32(np, "chan_priority", &tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) pdata->chan_priority = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (!of_property_read_u32(np, "block_size", &tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) pdata->block_size = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (!of_property_read_u32_array(np, "data-width", arr, nr_masters)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) for (tmp = 0; tmp < nr_masters; tmp++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) pdata->data_width[tmp] = arr[tmp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) } else if (!of_property_read_u32_array(np, "data_width", arr, nr_masters)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) for (tmp = 0; tmp < nr_masters; tmp++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) pdata->data_width[tmp] = BIT(arr[tmp] & 0x07);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (!of_property_read_u32_array(np, "multi-block", mb, nr_channels)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) for (tmp = 0; tmp < nr_channels; tmp++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) pdata->multi_block[tmp] = mb[tmp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) for (tmp = 0; tmp < nr_channels; tmp++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) pdata->multi_block[tmp] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (of_property_read_u32_array(np, "snps,max-burst-len", pdata->max_burst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) nr_channels)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) memset32(pdata->max_burst, DW_DMA_MAX_BURST, nr_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (!of_property_read_u32(np, "snps,dma-protection-control", &tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (tmp > CHAN_PROTCTL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) pdata->protctl = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void dw_dma_of_controller_register(struct dw_dma *dw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct device *dev = dw->dma.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!dev->of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ret = of_dma_controller_register(dev->of_node, dw_dma_of_xlate, dw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) dev_err(dev, "could not register of_dma_controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void dw_dma_of_controller_free(struct dw_dma *dw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct device *dev = dw->dma.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!dev->of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) of_dma_controller_free(dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }