^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written by Hennus Bergman, 1992.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * 1994/12/26: Changes by Alex Nash to fix a minor bug in /proc/dma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * In the previous version the reported device could end up being wrong,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * if a device requested a DMA channel that was already in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * [It also happened to remove the sizeof(char *) == sizeof(int)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * assumption introduced because of those /proc/dma patches. -- Hennus]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* A note on resource allocation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * All drivers needing DMA channels, should allocate and release them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * through the public routines `request_dma()' and `free_dma()'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * In order to avoid problems, all processes should allocate resources in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * the same sequence and release them in the reverse order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * So, when allocating DMAs and IRQs, first allocate the IRQ, then the DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * When releasing them, first release the DMA, then release the IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * If you don't, you may cause allocation requests to fail unnecessarily.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * This doesn't really matter now, but it will once we get real semaphores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * in the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) DEFINE_SPINLOCK(dma_spin_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * If our port doesn't define this it has no PC like DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #ifdef MAX_DMA_CHANNELS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Channel n is busy iff dma_chan_busy[n].lock != 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * DMA0 used to be reserved for DRAM refresh, but apparently not any more...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * DMA4 is reserved for cascading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct dma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) const char *device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static struct dma_chan dma_chan_busy[MAX_DMA_CHANNELS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) [4] = { 1, "cascade" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * request_dma - request and reserve a system DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * @dmanr: DMA channel number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @device_id: reserving device ID string, used in /proc/dma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) int request_dma(unsigned int dmanr, const char * device_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (dmanr >= MAX_DMA_CHANNELS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (xchg(&dma_chan_busy[dmanr].lock, 1) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) dma_chan_busy[dmanr].device_id = device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* old flag was 0, now contains 1 to indicate busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) } /* request_dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * free_dma - free a reserved system DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * @dmanr: DMA channel number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) void free_dma(unsigned int dmanr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (dmanr >= MAX_DMA_CHANNELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) printk(KERN_WARNING "Trying to free DMA%d\n", dmanr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (xchg(&dma_chan_busy[dmanr].lock, 0) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) } /* free_dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int request_dma(unsigned int dmanr, const char *device_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) void free_dma(unsigned int dmanr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #ifdef MAX_DMA_CHANNELS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static int proc_dma_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (dma_chan_busy[i].lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) seq_printf(m, "%2d: %s\n", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) dma_chan_busy[i].device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static int proc_dma_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) seq_puts(m, "No DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #endif /* MAX_DMA_CHANNELS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static int __init proc_dma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) proc_create_single("dma", 0, NULL, proc_dma_show);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) __initcall(proc_dma_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) EXPORT_SYMBOL(request_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) EXPORT_SYMBOL(free_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) EXPORT_SYMBOL(dma_spin_lock);