^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This code implements the DMA subsystem. It provides a HW-neutral interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * for other kernel code to use asynchronous memory copy capabilities,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * if present, and allows different HW DMA drivers to register as providing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * this capability.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Due to the fact we are accelerating what is already a relatively fast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * operation, the code goes to great lengths to avoid additional overhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * such as locking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * LOCKING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * The subsystem keeps a global list of dma_device structs it is protected by a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * mutex, dma_list_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * A subsystem can get access to a channel by calling dmaengine_get() followed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * by dma_find_channel(), or if it has need for an exclusive channel it can call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * dma_request_channel(). Once a channel is allocated a reference is taken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * against its corresponding driver to disable removal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Each device has a channels list, which runs unlocked but is never modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * once the device is registered, it's just setup by the driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * See Documentation/driver-api/dmaengine for more details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/acpi_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <linux/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static DEFINE_MUTEX(dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static DEFINE_IDA(dma_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static LIST_HEAD(dma_device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static long dmaengine_ref_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* --- debugfs implementation --- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static struct dentry *rootdir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static void dmaengine_debug_register(struct dma_device *dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) rootdir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (IS_ERR(dma_dev->dbg_dev_root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) dma_dev->dbg_dev_root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static void dmaengine_debug_unregister(struct dma_device *dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) debugfs_remove_recursive(dma_dev->dbg_dev_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) dma_dev->dbg_dev_root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static void dmaengine_dbg_summary_show(struct seq_file *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct dma_device *dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) list_for_each_entry(chan, &dma_dev->channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (chan->client_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) seq_printf(s, " %-13s| %s", dma_chan_name(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) chan->dbg_client_name ?: "in-use");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (chan->router)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) seq_printf(s, " (via router: %s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) dev_name(chan->router->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) seq_puts(s, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static int dmaengine_summary_show(struct seq_file *s, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct dma_device *dma_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) list_for_each_entry(dma_dev, &dma_device_list, global_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) seq_printf(s, "dma%d (%s): number of channels: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) dma_dev->dev_id, dev_name(dma_dev->dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) dma_dev->chancnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (dma_dev->dbg_summary_show)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) dma_dev->dbg_summary_show(s, dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) dmaengine_dbg_summary_show(s, dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (!list_is_last(&dma_dev->global_node, &dma_device_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) seq_puts(s, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void __init dmaengine_debugfs_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) rootdir = debugfs_create_dir("dmaengine", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* /sys/kernel/debug/dmaengine/summary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) debugfs_create_file("summary", 0444, rootdir, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) &dmaengine_summary_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static inline void dmaengine_debugfs_init(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline int dmaengine_debug_register(struct dma_device *dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #endif /* DEBUG_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* --- sysfs implementation --- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define DMA_SLAVE_NAME "slave"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * dev_to_dma_chan - convert a device pointer to its sysfs container object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * @dev: device node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Must be called under dma_list_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static struct dma_chan *dev_to_dma_chan(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct dma_chan_dev *chan_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) chan_dev = container_of(dev, typeof(*chan_dev), device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return chan_dev->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static ssize_t memcpy_count_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned long count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) chan = dev_to_dma_chan(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) count += per_cpu_ptr(chan->local, i)->memcpy_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) err = sprintf(buf, "%lu\n", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static DEVICE_ATTR_RO(memcpy_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static ssize_t bytes_transferred_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) unsigned long count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) chan = dev_to_dma_chan(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) for_each_possible_cpu(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) count += per_cpu_ptr(chan->local, i)->bytes_transferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) err = sprintf(buf, "%lu\n", count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static DEVICE_ATTR_RO(bytes_transferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) chan = dev_to_dma_chan(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) err = sprintf(buf, "%d\n", chan->client_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static DEVICE_ATTR_RO(in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) static struct attribute *dma_dev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) &dev_attr_memcpy_count.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) &dev_attr_bytes_transferred.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) &dev_attr_in_use.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ATTRIBUTE_GROUPS(dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) static void chan_dev_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct dma_chan_dev *chan_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) chan_dev = container_of(dev, typeof(*chan_dev), device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) kfree(chan_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static struct class dma_devclass = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) .name = "dma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) .dev_groups = dma_dev_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) .dev_release = chan_dev_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* --- client and device registration --- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* enable iteration over all operation types */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static dma_cap_mask_t dma_cap_mask_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * @chan: associated channel for this entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct dma_chan_tbl_ent {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* percpu lookup table for memory-to-memory offload providers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static int __init dma_channel_table_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) enum dma_transaction_type cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* 'interrupt', 'private', and 'slave' are channel capabilities,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * but are not associated with an operation so they do not need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * an entry in the channel_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) for_each_dma_cap_mask(cap, dma_cap_mask_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (!channel_table[cap]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) for_each_dma_cap_mask(cap, dma_cap_mask_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) free_percpu(channel_table[cap]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) arch_initcall(dma_channel_table_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * @chan: DMA channel to test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * @cpu: CPU index which the channel should be close to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Returns true if the channel is in the same NUMA-node as the CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) int node = dev_to_node(chan->device->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return node == NUMA_NO_NODE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) cpumask_test_cpu(cpu, cpumask_of_node(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * @cap: capability to match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * @cpu: CPU index which the channel should be close to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * If some channels are close to the given CPU, the one with the lowest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * reference count is returned. Otherwise, CPU is ignored and only the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * reference count is taken into account.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * Must be called under dma_list_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct dma_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct dma_chan *min = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct dma_chan *localmin = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) list_for_each_entry(device, &dma_device_list, global_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (!dma_has_cap(cap, device->cap_mask) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) dma_has_cap(DMA_PRIVATE, device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) list_for_each_entry(chan, &device->channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!chan->client_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!min || chan->table_count < min->table_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) min = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (dma_chan_is_local(chan, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (!localmin ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) chan->table_count < localmin->table_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) localmin = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) chan = localmin ? localmin : min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) chan->table_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * dma_channel_rebalance - redistribute the available channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * Optimize for CPU isolation (each CPU gets a dedicated channel for an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * operation type) in the SMP case, and operation isolation (avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * multi-tasking channels) in the non-SMP case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * Must be called under dma_list_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static void dma_channel_rebalance(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct dma_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* undo the last distribution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) for_each_dma_cap_mask(cap, dma_cap_mask_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) list_for_each_entry(device, &dma_device_list, global_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) list_for_each_entry(chan, &device->channels, device_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) chan->table_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* don't populate the channel_table if no clients are available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (!dmaengine_ref_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* redistribute available channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) for_each_dma_cap_mask(cap, dma_cap_mask_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) chan = min_chan(cap, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static int dma_device_satisfies_mask(struct dma_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) const dma_cap_mask_t *want)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) dma_cap_mask_t has;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) bitmap_and(has.bits, want->bits, device->cap_mask.bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) DMA_TX_TYPE_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static struct module *dma_chan_to_owner(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return chan->device->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * balance_ref_count - catch up the channel reference count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * @chan: channel to balance ->client_count versus dmaengine_ref_count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * Must be called under dma_list_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static void balance_ref_count(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct module *owner = dma_chan_to_owner(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) while (chan->client_count < dmaengine_ref_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) __module_get(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) chan->client_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static void dma_device_release(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct dma_device *device = container_of(ref, struct dma_device, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) list_del_rcu(&device->global_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) dma_channel_rebalance();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (device->device_release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) device->device_release(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) static void dma_device_put(struct dma_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) lockdep_assert_held(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) kref_put(&device->ref, dma_device_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * dma_chan_get - try to grab a DMA channel's parent driver module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * @chan: channel to grab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * Must be called under dma_list_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static int dma_chan_get(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct module *owner = dma_chan_to_owner(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* The channel is already in use, update client count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (chan->client_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) __module_get(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (!try_module_get(owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ret = kref_get_unless_zero(&chan->device->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) goto module_put_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /* allocate upon first client reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (chan->device->device_alloc_chan_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ret = chan->device->device_alloc_chan_resources(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) balance_ref_count(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) chan->client_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) dma_device_put(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) module_put_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) module_put(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * dma_chan_put - drop a reference to a DMA channel's parent driver module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * @chan: channel to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * Must be called under dma_list_mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static void dma_chan_put(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* This channel is not in use, bail out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (!chan->client_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) chan->client_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /* This channel is not in use anymore, free it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!chan->client_count && chan->device->device_free_chan_resources) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Make sure all operations have completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) dmaengine_synchronize(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) chan->device->device_free_chan_resources(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /* If the channel is used via a DMA request router, free the mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (chan->router && chan->router->route_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) chan->router->route_free(chan->router->dev, chan->route_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) chan->router = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) chan->route_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) dma_device_put(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) module_put(dma_chan_to_owner(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) dma_async_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) dev_err(chan->device->dev, "%s: timeout!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return DMA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (status != DMA_IN_PROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) EXPORT_SYMBOL(dma_sync_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * dma_find_channel - find a channel to carry out the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * @tx_type: transaction type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return this_cpu_read(channel_table[tx_type]->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) EXPORT_SYMBOL(dma_find_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * dma_issue_pending_all - flush all pending operations across all channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) void dma_issue_pending_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct dma_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) list_for_each_entry_rcu(device, &dma_device_list, global_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) list_for_each_entry(chan, &device->channels, device_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (chan->client_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) device->device_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) EXPORT_SYMBOL(dma_issue_pending_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct dma_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (!chan || !caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) device = chan->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* check if the channel supports slave transactions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) test_bit(DMA_CYCLIC, device->cap_mask.bits)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * Check whether it reports it uses the generic slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * capabilities, if not, that means it doesn't support any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * kind of slave capabilities reporting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (!device->directions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) caps->src_addr_widths = device->src_addr_widths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) caps->dst_addr_widths = device->dst_addr_widths;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) caps->directions = device->directions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) caps->min_burst = device->min_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) caps->max_burst = device->max_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) caps->max_sg_burst = device->max_sg_burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) caps->residue_granularity = device->residue_granularity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) caps->descriptor_reuse = device->descriptor_reuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) caps->cmd_pause = !!device->device_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) caps->cmd_resume = !!device->device_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) caps->cmd_terminate = !!device->device_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * DMA engine device might be configured with non-uniformly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * distributed slave capabilities per device channels. In this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * case the corresponding driver may provide the device_caps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * callback to override the generic capabilities with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * channel-specific ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (device->device_caps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) device->device_caps(chan, caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) EXPORT_SYMBOL_GPL(dma_get_slave_caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct dma_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) dma_filter_fn fn, void *fn_param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (mask && !dma_device_satisfies_mask(dev, mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* devices with multiple channels need special handling as we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * ensure that all channels are either private or public.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) list_for_each_entry(chan, &dev->channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* some channels are already publicly allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (chan->client_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) list_for_each_entry(chan, &dev->channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (chan->client_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) dev_dbg(dev->dev, "%s: %s busy\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) __func__, dma_chan_name(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (fn && !fn(chan, fn_param)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) dev_dbg(dev->dev, "%s: %s filter said false\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) __func__, dma_chan_name(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static struct dma_chan *find_candidate(struct dma_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) const dma_cap_mask_t *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) dma_filter_fn fn, void *fn_param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) /* Found a suitable channel, try to grab, prep, and return it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * We first set DMA_PRIVATE to disable balance_ref_count as this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * channel will not be published in the general-purpose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) dma_cap_set(DMA_PRIVATE, device->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) device->privatecnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) err = dma_chan_get(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (err == -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) dev_dbg(device->dev, "%s: %s module removed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) __func__, dma_chan_name(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) list_del_rcu(&device->global_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) dev_dbg(device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) "%s: failed to get %s: (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) __func__, dma_chan_name(chan), err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (--device->privatecnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) dma_cap_clear(DMA_PRIVATE, device->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) chan = ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return chan ? chan : ERR_PTR(-EPROBE_DEFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * dma_get_slave_channel - try to get specific channel exclusively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * @chan: target channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) int err = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* lock against __dma_request_channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (chan->client_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct dma_device *device = chan->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) dma_cap_set(DMA_PRIVATE, device->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) device->privatecnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) err = dma_chan_get(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) dev_dbg(chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) "%s: failed to get %s: (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) __func__, dma_chan_name(chan), err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (--device->privatecnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) dma_cap_clear(DMA_PRIVATE, device->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) EXPORT_SYMBOL_GPL(dma_get_slave_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) dma_cap_set(DMA_SLAVE, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /* lock against __dma_request_channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) chan = find_candidate(device, &mask, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return IS_ERR(chan) ? NULL : chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * __dma_request_channel - try to allocate an exclusive channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * @mask: capabilities that the channel must satisfy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * @fn: optional callback to disposition available channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * @fn_param: opaque parameter to pass to dma_filter_fn()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * @np: device node to look for DMA channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * Returns pointer to appropriate DMA channel on success or NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) dma_filter_fn fn, void *fn_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct device_node *np)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct dma_device *device, *_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct dma_chan *chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* Find a channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /* Finds a DMA controller with matching device node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (np && device->dev->of_node && np != device->dev->of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) chan = find_candidate(device, mask, fn, fn_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (!IS_ERR(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) pr_debug("%s: %s (%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) chan ? "success" : "fail",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) chan ? dma_chan_name(chan) : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) EXPORT_SYMBOL_GPL(__dma_request_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (!device->filter.mapcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) for (i = 0; i < device->filter.mapcnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) const struct dma_slave_map *map = &device->filter.map[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (!strcmp(map->devname, dev_name(dev)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) !strcmp(map->slave, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * dma_request_chan - try to allocate an exclusive slave channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * @dev: pointer to client device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * @name: slave channel name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * Returns pointer to appropriate DMA channel on success or an error pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct dma_chan *dma_request_chan(struct device *dev, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) struct dma_device *d, *_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct dma_chan *chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /* If device-tree is present get slave info from here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (dev->of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) chan = of_dma_request_slave_channel(dev->of_node, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /* If device was enumerated by ACPI get slave info from here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (has_acpi_companion(dev) && !chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) chan = acpi_dma_request_slave_chan_by_name(dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (PTR_ERR(chan) == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (!IS_ERR_OR_NULL(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* Try to find the channel via the DMA filter map(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) dma_cap_mask_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) const struct dma_slave_map *map = dma_filter_match(d, name, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (!map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) dma_cap_zero(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) dma_cap_set(DMA_SLAVE, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) chan = find_candidate(d, &mask, d->filter.fn, map->param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (!IS_ERR(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (IS_ERR(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return ERR_PTR(-EPROBE_DEFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (!chan->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) chan->slave = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) DMA_SLAVE_NAME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) EXPORT_SYMBOL_GPL(dma_request_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * @mask: capabilities that the channel must satisfy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * Returns pointer to appropriate DMA channel on success or an error pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (!mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) chan = __dma_request_channel(mask, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (!chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) if (list_empty(&dma_device_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) chan = ERR_PTR(-EPROBE_DEFER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) chan = ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) void dma_release_channel(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) WARN_ONCE(chan->client_count != 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) "chan reference count %d != 1\n", chan->client_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) dma_chan_put(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /* drop PRIVATE cap enabled by __dma_request_channel() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (--chan->device->privatecnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (chan->slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) sysfs_remove_link(&chan->slave->kobj, chan->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) kfree(chan->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) chan->name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) chan->slave = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) #ifdef CONFIG_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) kfree(chan->dbg_client_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) chan->dbg_client_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) EXPORT_SYMBOL_GPL(dma_release_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * dmaengine_get - register interest in dma_channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) void dmaengine_get(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct dma_device *device, *_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) dmaengine_ref_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* try to grab channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) list_for_each_entry(chan, &device->channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) err = dma_chan_get(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (err == -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /* module removed before we could use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) list_del_rcu(&device->global_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) } else if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) dev_dbg(chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) "%s: failed to get %s: (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) __func__, dma_chan_name(chan), err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /* if this is the first reference and there were channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * waiting we need to rebalance to get those channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * incorporated into the channel table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (dmaengine_ref_count == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) dma_channel_rebalance();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) EXPORT_SYMBOL(dmaengine_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * dmaengine_put - let DMA drivers be removed when ref_count == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) void dmaengine_put(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct dma_device *device, *_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) dmaengine_ref_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) BUG_ON(dmaengine_ref_count < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /* drop channel references */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) list_for_each_entry(chan, &device->channels, device_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) dma_chan_put(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) EXPORT_SYMBOL(dmaengine_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) static bool device_has_all_tx_types(struct dma_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /* A device that satisfies this test has channels that will never cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * an async_tx channel switch event as all possible operation types can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * be handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) #ifdef CONFIG_ASYNC_TX_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) #if IS_ENABLED(CONFIG_ASYNC_XOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (!dma_has_cap(DMA_XOR, device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) #if IS_ENABLED(CONFIG_ASYNC_PQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (!dma_has_cap(DMA_PQ, device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) static int get_dma_id(struct dma_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) int rc = ida_alloc(&dma_ida, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) device->dev_id = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static int __dma_async_device_channel_register(struct dma_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) chan->local = alloc_percpu(typeof(*chan->local));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (!chan->local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (!chan->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) goto err_free_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * When the chan_id is a negative value, we are dynamically adding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * the channel. Otherwise we are static enumerating.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) mutex_lock(&device->chan_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) mutex_unlock(&device->chan_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (chan->chan_id < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) pr_err("%s: unable to alloc ida for chan: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) __func__, chan->chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) rc = chan->chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) goto err_free_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) chan->dev->device.class = &dma_devclass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) chan->dev->device.parent = device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) chan->dev->chan = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) chan->dev->dev_id = device->dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) dev_set_name(&chan->dev->device, "dma%dchan%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) device->dev_id, chan->chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) rc = device_register(&chan->dev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) goto err_out_ida;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) chan->client_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) device->chancnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) err_out_ida:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) mutex_lock(&device->chan_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) ida_free(&device->chan_ida, chan->chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) mutex_unlock(&device->chan_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) err_free_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) kfree(chan->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) err_free_local:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) free_percpu(chan->local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) chan->local = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) int dma_async_device_channel_register(struct dma_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) rc = __dma_async_device_channel_register(device, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) dma_channel_rebalance();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) static void __dma_async_device_channel_unregister(struct dma_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) WARN_ONCE(!device->device_release && chan->client_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) "%s called while %d clients hold a reference\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) __func__, chan->client_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) device->chancnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) chan->dev->chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) mutex_lock(&device->chan_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) ida_free(&device->chan_ida, chan->chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) mutex_unlock(&device->chan_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) device_unregister(&chan->dev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) free_percpu(chan->local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) void dma_async_device_channel_unregister(struct dma_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) __dma_async_device_channel_unregister(device, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) dma_channel_rebalance();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * dma_async_device_register - registers DMA devices found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) * @device: pointer to &struct dma_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * After calling this routine the structure should not be freed except in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * device_release() callback which will be called after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * dma_async_device_unregister() is called and no further references are taken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) int dma_async_device_register(struct dma_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) struct dma_chan* chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (!device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /* validate device routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (!device->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) pr_err("DMAdevice must have dev\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) device->owner = device->dev->driver->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) dev_err(device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) "Device claims capability %s, but op is not defined\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) "DMA_MEMCPY");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) dev_err(device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) "Device claims capability %s, but op is not defined\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) "DMA_XOR");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) dev_err(device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) "Device claims capability %s, but op is not defined\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) "DMA_XOR_VAL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) dev_err(device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) "Device claims capability %s, but op is not defined\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) "DMA_PQ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) dev_err(device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) "Device claims capability %s, but op is not defined\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) "DMA_PQ_VAL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) dev_err(device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) "Device claims capability %s, but op is not defined\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) "DMA_MEMSET");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) dev_err(device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) "Device claims capability %s, but op is not defined\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) "DMA_INTERRUPT");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) dev_err(device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) "Device claims capability %s, but op is not defined\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) "DMA_CYCLIC");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) dev_err(device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) "Device claims capability %s, but op is not defined\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) "DMA_INTERLEAVE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (!device->device_tx_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) dev_err(device->dev, "Device tx_status is not defined\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (!device->device_issue_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) dev_err(device->dev, "Device issue_pending is not defined\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (!device->device_release)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) dev_dbg(device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) kref_init(&device->ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /* note: this only matters in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (device_has_all_tx_types(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) rc = get_dma_id(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) mutex_init(&device->chan_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) ida_init(&device->chan_ida);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /* represent channels in sysfs. Probably want devs too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) list_for_each_entry(chan, &device->channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) rc = __dma_async_device_channel_register(device, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) /* take references on public channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) list_for_each_entry(chan, &device->channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) /* if clients are already waiting for channels we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) * to take references on their behalf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (dma_chan_get(chan) == -ENODEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) /* note we can only get here for the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * channel as the remaining channels are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * guaranteed to get a reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) list_add_tail_rcu(&device->global_node, &dma_device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) device->privatecnt++; /* Always private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) dma_channel_rebalance();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) dmaengine_debug_register(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) /* if we never registered a channel just release the idr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (!device->chancnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) ida_free(&dma_ida, device->dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) list_for_each_entry(chan, &device->channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (chan->local == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) chan->dev->chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) device_unregister(&chan->dev->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) free_percpu(chan->local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) EXPORT_SYMBOL(dma_async_device_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) * dma_async_device_unregister - unregister a DMA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * @device: pointer to &struct dma_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * This routine is called by dma driver exit routines, dmaengine holds module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * references to prevent it being called while channels are in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) void dma_async_device_unregister(struct dma_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct dma_chan *chan, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) dmaengine_debug_unregister(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) list_for_each_entry_safe(chan, n, &device->channels, device_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) __dma_async_device_channel_unregister(device, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) mutex_lock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * setting DMA_PRIVATE ensures the device being torn down will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * be used in the channel_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) dma_cap_set(DMA_PRIVATE, device->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) dma_channel_rebalance();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) ida_free(&dma_ida, device->dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) dma_device_put(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) mutex_unlock(&dma_list_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) EXPORT_SYMBOL(dma_async_device_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) static void dmam_device_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) struct dma_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) device = *(struct dma_device **)res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) dma_async_device_unregister(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * dmaenginem_async_device_register - registers DMA devices found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * @device: pointer to &struct dma_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * The operation is managed and will be undone on driver detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) int dmaenginem_async_device_register(struct dma_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) ret = dma_async_device_register(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) *(struct dma_device **)p = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) devres_add(device->dev, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) devres_free(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) EXPORT_SYMBOL(dmaenginem_async_device_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) struct dmaengine_unmap_pool {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) struct kmem_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) mempool_t *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) static struct dmaengine_unmap_pool unmap_pool[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) __UNMAP_POOL(2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) __UNMAP_POOL(16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) __UNMAP_POOL(128),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) __UNMAP_POOL(256),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) int order = get_count_order(nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) switch (order) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) case 0 ... 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) return &unmap_pool[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) case 2 ... 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return &unmap_pool[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) case 5 ... 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) return &unmap_pool[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) return &unmap_pool[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) static void dmaengine_unmap(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) struct device *dev = unmap->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) int cnt, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) cnt = unmap->to_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) for (i = 0; i < cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) dma_unmap_page(dev, unmap->addr[i], unmap->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) cnt += unmap->from_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) for (; i < cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) dma_unmap_page(dev, unmap->addr[i], unmap->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) cnt += unmap->bidi_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) for (; i < cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (unmap->addr[i] == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) dma_unmap_page(dev, unmap->addr[i], unmap->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) cnt = unmap->map_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) mempool_free(unmap, __get_unmap_pool(cnt)->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (unmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) kref_put(&unmap->kref, dmaengine_unmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) static void dmaengine_destroy_unmap_pool(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) struct dmaengine_unmap_pool *p = &unmap_pool[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) mempool_destroy(p->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) p->pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) kmem_cache_destroy(p->cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) p->cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) static int __init dmaengine_init_unmap_pool(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct dmaengine_unmap_pool *p = &unmap_pool[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) size = sizeof(struct dmaengine_unmap_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) sizeof(dma_addr_t) * p->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) p->cache = kmem_cache_create(p->name, size, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (!p->cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) p->pool = mempool_create_slab_pool(1, p->cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) if (!p->pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) if (i == ARRAY_SIZE(unmap_pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) dmaengine_destroy_unmap_pool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) struct dmaengine_unmap_data *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) struct dmaengine_unmap_data *unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (!unmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) memset(unmap, 0, sizeof(*unmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) kref_init(&unmap->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) unmap->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) unmap->map_cnt = nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) return unmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) EXPORT_SYMBOL(dmaengine_get_unmap_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) tx->chan = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) spin_lock_init(&tx->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) EXPORT_SYMBOL(dma_async_tx_descriptor_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) static inline int desc_check_and_set_metadata_mode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) /* Make sure that the metadata mode is not mixed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) if (!desc->desc_metadata_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) desc->desc_metadata_mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) } else if (desc->desc_metadata_mode != mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) void *data, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (!desc->metadata_ops || !desc->metadata_ops->attach)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) return desc->metadata_ops->attach(desc, data, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) size_t *payload_len, size_t *max_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) return ERR_PTR(-ENOTSUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) size_t payload_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (!desc->metadata_ops || !desc->metadata_ops->set_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) return desc->metadata_ops->set_len(desc, payload_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * dma_wait_for_async_tx - spin wait for a transaction to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * @tx: in-flight transaction to wait on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) enum dma_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (!tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return DMA_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) while (tx->cookie == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) dev_err(tx->chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) "%s timeout waiting for descriptor submission\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) return DMA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) return dma_sync_wait(tx->chan, tx->cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) * dma_run_dependencies - process dependent operations on the target channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) * @tx: transaction with dependencies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) * Helper routine for DMA drivers to process (start) dependent operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * on their target channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct dma_async_tx_descriptor *dep = txd_next(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) struct dma_async_tx_descriptor *dep_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (!dep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) /* we'll submit tx->next now, so clear the link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) txd_clear_next(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) chan = dep->chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) /* keep submitting up until a channel switch is detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) * in that case we will be called again as a result of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * processing the interrupt from async_tx_channel_switch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) for (; dep; dep = dep_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) txd_lock(dep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) txd_clear_parent(dep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) dep_next = txd_next(dep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (dep_next && dep_next->chan == chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) txd_clear_next(dep); /* ->next will be submitted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) dep_next = NULL; /* submit current dep and terminate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) txd_unlock(dep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) dep->tx_submit(dep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) chan->device->device_issue_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) EXPORT_SYMBOL_GPL(dma_run_dependencies);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) static int __init dma_bus_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) int err = dmaengine_init_unmap_pool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) err = class_register(&dma_devclass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) dmaengine_debugfs_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) arch_initcall(dma_bus_init);