^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Device tree helpers for DMA request / controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Based on of_gpio.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static LIST_HEAD(of_dma_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static DEFINE_MUTEX(of_dma_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * of_dma_find_controller - Get a DMA controller in DT DMA helpers list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * @dma_spec: pointer to DMA specifier as found in the device tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Finds a DMA controller with matching device node and number for dma cells
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * in a list of registered DMA controllers. If a match is found a valid pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * to the DMA data stored is retuned. A NULL pointer is returned if no match is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct of_dma *ofdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (ofdma->of_node == dma_spec->np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) return ofdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) pr_debug("%s: can't find DMA controller %pOF\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) dma_spec->np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * of_dma_router_xlate - translation function for router devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * @dma_spec: pointer to DMA specifier as found in the device tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * @ofdma: pointer to DMA controller data (router information)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * The function creates new dma_spec to be passed to the router driver's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * of_dma_route_allocate() function to prepare a dma_spec which will be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * to request channel from the real DMA controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct of_dma *ofdma_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct of_phandle_args dma_spec_target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void *route_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* translate the request for the real DMA controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) memcpy(&dma_spec_target, dma_spec, sizeof(dma_spec_target));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) route_data = ofdma->of_dma_route_allocate(&dma_spec_target, ofdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (IS_ERR(route_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) ofdma_target = of_dma_find_controller(&dma_spec_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (!ofdma_target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ofdma->dma_router->route_free(ofdma->dma_router->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) route_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) chan = ERR_PTR(-EPROBE_DEFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (IS_ERR_OR_NULL(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) ofdma->dma_router->route_free(ofdma->dma_router->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) route_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) chan->router = ofdma->dma_router;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) chan->route_data = route_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * Need to put the node back since the ofdma->of_dma_route_allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * has taken it for generating the new, translated dma_spec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) of_node_put(dma_spec_target.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * of_dma_controller_register - Register a DMA controller to DT DMA helpers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * @np: device node of DMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * @of_dma_xlate: translation function which converts a phandle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * arguments list into a dma_chan structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * @data: pointer to controller specific data to be used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * translation function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Returns 0 on success or appropriate errno value on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Allocated memory should be freed with appropriate of_dma_controller_free()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int of_dma_controller_register(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct dma_chan *(*of_dma_xlate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) (struct of_phandle_args *, struct of_dma *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct of_dma *ofdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!np || !of_dma_xlate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) pr_err("%s: not enough information provided\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ofdma = kzalloc(sizeof(*ofdma), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (!ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ofdma->of_node = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ofdma->of_dma_xlate = of_dma_xlate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ofdma->of_dma_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Now queue of_dma controller structure in list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) mutex_lock(&of_dma_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) list_add_tail(&ofdma->of_dma_controllers, &of_dma_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) mutex_unlock(&of_dma_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) EXPORT_SYMBOL_GPL(of_dma_controller_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * of_dma_controller_free - Remove a DMA controller from DT DMA helpers list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * @np: device node of DMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * Memory allocated by of_dma_controller_register() is freed here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) void of_dma_controller_free(struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct of_dma *ofdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) mutex_lock(&of_dma_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (ofdma->of_node == np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) list_del(&ofdma->of_dma_controllers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) kfree(ofdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) mutex_unlock(&of_dma_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) EXPORT_SYMBOL_GPL(of_dma_controller_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * of_dma_router_register - Register a DMA router to DT DMA helpers as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * @np: device node of DMA router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * @of_dma_route_allocate: setup function for the router which need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * modify the dma_spec for the DMA controller to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * use and to set up the requested route.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * @dma_router: pointer to dma_router structure to be used when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * the route need to be free up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Returns 0 on success or appropriate errno value on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * Allocated memory should be freed with appropriate of_dma_controller_free()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int of_dma_router_register(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) void *(*of_dma_route_allocate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) (struct of_phandle_args *, struct of_dma *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct dma_router *dma_router)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct of_dma *ofdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!np || !of_dma_route_allocate || !dma_router) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) pr_err("%s: not enough information provided\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) ofdma = kzalloc(sizeof(*ofdma), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (!ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ofdma->of_node = np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ofdma->of_dma_xlate = of_dma_router_xlate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ofdma->of_dma_route_allocate = of_dma_route_allocate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ofdma->dma_router = dma_router;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* Now queue of_dma controller structure in list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) mutex_lock(&of_dma_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) list_add_tail(&ofdma->of_dma_controllers, &of_dma_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) mutex_unlock(&of_dma_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) EXPORT_SYMBOL_GPL(of_dma_router_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * of_dma_match_channel - Check if a DMA specifier matches name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * @np: device node to look for DMA channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * @name: channel name to be matched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * @index: index of DMA specifier in list of DMA specifiers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * @dma_spec: pointer to DMA specifier as found in the device tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * Check if the DMA specifier pointed to by the index in a list of DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * specifiers, matches the name provided. Returns 0 if the name matches and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * a valid pointer to the DMA specifier is found. Otherwise returns -ENODEV.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static int of_dma_match_channel(struct device_node *np, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int index, struct of_phandle_args *dma_spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) const char *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (of_property_read_string_index(np, "dma-names", index, &s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (strcmp(name, s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (of_parse_phandle_with_args(np, "dmas", "#dma-cells", index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dma_spec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * of_dma_request_slave_channel - Get the DMA slave channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * @np: device node to get DMA request from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * @name: name of desired channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * Returns pointer to appropriate DMA channel on success or an error pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct of_phandle_args dma_spec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct of_dma *ofdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int count, i, start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int ret_no_channel = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static atomic_t last_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (!np || !name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) pr_err("%s: not enough information provided\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* Silently fail if there is not even the "dmas" property */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (!of_find_property(np, "dmas", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) count = of_property_count_strings(np, "dma-names");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (count < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) pr_err("%s: dma-names property of node '%pOF' missing or empty\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) __func__, np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * approximate an average distribution across multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * entries with the same name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) start = atomic_inc_return(&last_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (of_dma_match_channel(np, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) (i + start) % count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) &dma_spec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) mutex_lock(&of_dma_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ofdma = of_dma_find_controller(&dma_spec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (ofdma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) ret_no_channel = -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) mutex_unlock(&of_dma_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) of_node_put(dma_spec.np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) return ERR_PTR(ret_no_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) EXPORT_SYMBOL_GPL(of_dma_request_slave_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * of_dma_simple_xlate - Simple DMA engine translation function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * @dma_spec: pointer to DMA specifier as found in the device tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * @ofdma: pointer to DMA controller data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * A simple translation function for devices that use a 32-bit value for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * filter_param when calling the DMA engine dma_request_channel() function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * Note that this translation function requires that #dma-cells is equal to 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * and the argument of the dma specifier is the 32-bit filter_param. Returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * pointer to appropriate dma channel on success or NULL on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int count = dma_spec->args_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct of_dma_filter_info *info = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!info || !info->filter_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (count != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return __dma_request_channel(&info->dma_cap, info->filter_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) &dma_spec->args[0], dma_spec->np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * of_dma_xlate_by_chan_id - Translate dt property to DMA channel by channel id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * @dma_spec: pointer to DMA specifier as found in the device tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * @ofdma: pointer to DMA controller data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * This function can be used as the of xlate callback for DMA driver which wants
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * to match the channel based on the channel id. When using this xlate function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * the #dma-cells propety of the DMA controller dt node needs to be set to 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * The data parameter of of_dma_controller_register must be a pointer to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * dma_device struct the function should match upon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * Returns pointer to appropriate dma channel on success or NULL on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct dma_device *dev = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct dma_chan *chan, *candidate = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (!dev || dma_spec->args_count != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) list_for_each_entry(chan, &dev->channels, device_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (chan->chan_id == dma_spec->args[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) candidate = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (!candidate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return dma_get_slave_channel(candidate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) EXPORT_SYMBOL_GPL(of_dma_xlate_by_chan_id);