^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * drivers/uio/uio_dmem_genirq.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Userspace I/O platform driver with generic IRQ handling code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2012 Damian Hobson-Garcia
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Based on uio_pdrv_genirq.c by Magnus Damm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/uio_driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/platform_data/uio_dmem_genirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define DRIVER_NAME "uio_dmem_genirq"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define DMEM_MAP_ERROR (~0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct uio_dmem_genirq_platdata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct uio_info *uioinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned int dmem_region_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned int num_dmem_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void *dmem_region_vaddr[MAX_UIO_MAPS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct mutex alloc_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned int refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct uio_dmem_genirq_platdata *priv = info->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct uio_mem *uiomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int dmem_region = priv->dmem_region_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) mutex_lock(&priv->alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (!uiomem->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) addr = dma_alloc_coherent(&priv->pdev->dev, uiomem->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) (dma_addr_t *)&uiomem->addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) uiomem->addr = DMEM_MAP_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) priv->dmem_region_vaddr[dmem_region++] = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ++uiomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) priv->refcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) mutex_unlock(&priv->alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Wait until the Runtime PM code has woken up the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) pm_runtime_get_sync(&priv->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct uio_dmem_genirq_platdata *priv = info->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct uio_mem *uiomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int dmem_region = priv->dmem_region_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* Tell the Runtime PM code that the device has become idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) pm_runtime_put_sync(&priv->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) mutex_lock(&priv->alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) priv->refcnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (!uiomem->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (priv->dmem_region_vaddr[dmem_region]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) dma_free_coherent(&priv->pdev->dev, uiomem->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) priv->dmem_region_vaddr[dmem_region],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) uiomem->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) uiomem->addr = DMEM_MAP_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) ++dmem_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) ++uiomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) mutex_unlock(&priv->alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static irqreturn_t uio_dmem_genirq_handler(int irq, struct uio_info *dev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct uio_dmem_genirq_platdata *priv = dev_info->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Just disable the interrupt in the interrupt controller, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * remember the state so we can allow user space to enable it later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (!test_and_set_bit(0, &priv->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) disable_irq_nosync(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct uio_dmem_genirq_platdata *priv = dev_info->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* Allow user space to enable and disable the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * in the interrupt controller, but keep track of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * state to prevent per-irq depth damage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * Serialize this operation to support multiple tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (irq_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (test_and_clear_bit(0, &priv->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) enable_irq(dev_info->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (!test_and_set_bit(0, &priv->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) disable_irq(dev_info->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static int uio_dmem_genirq_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct uio_dmem_genirq_pdata *pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct uio_info *uioinfo = &pdata->uioinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct uio_dmem_genirq_platdata *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct uio_mem *uiomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (pdev->dev.of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* alloc uioinfo for one device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (!uioinfo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) dev_err(&pdev->dev, "unable to kmalloc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) goto bad2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) uioinfo->version = "devicetree";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (!uioinfo || !uioinfo->name || !uioinfo->version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) dev_err(&pdev->dev, "missing platform_data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) goto bad0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (uioinfo->handler || uioinfo->irqcontrol ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) uioinfo->irq_flags & IRQF_SHARED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) dev_err(&pdev->dev, "interrupt configuration error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) goto bad0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) priv = kzalloc(sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (!priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) dev_err(&pdev->dev, "unable to kmalloc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) goto bad0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) priv->uioinfo = uioinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) spin_lock_init(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) priv->flags = 0; /* interrupt is enabled to begin with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) priv->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) mutex_init(&priv->alloc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (!uioinfo->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* Multiple IRQs are not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ret = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (ret == -ENXIO && pdev->dev.of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ret = UIO_IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) goto bad1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) uioinfo->irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (uioinfo->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) struct irq_data *irq_data = irq_get_irq_data(uioinfo->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * If a level interrupt, dont do lazy disable. Otherwise the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * irq will fire again since clearing of the actual cause, on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * device level, is done in userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * irqd_is_level_type() isn't used since isn't valid until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * irq is configured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (irq_data &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) irqd_get_trigger_type(irq_data) & IRQ_TYPE_LEVEL_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) dev_dbg(&pdev->dev, "disable lazy unmask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) irq_set_status_flags(uioinfo->irq, IRQ_DISABLE_UNLAZY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) uiomem = &uioinfo->mem[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) for (i = 0; i < pdev->num_resources; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct resource *r = &pdev->resource[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (r->flags != IORESOURCE_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) dev_warn(&pdev->dev, "device has more than "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) __stringify(MAX_UIO_MAPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) " I/O memory resources.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) uiomem->memtype = UIO_MEM_PHYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) uiomem->addr = r->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) uiomem->size = resource_size(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ++uiomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) priv->dmem_region_start = uiomem - &uioinfo->mem[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) priv->num_dmem_regions = pdata->num_dynamic_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) for (i = 0; i < pdata->num_dynamic_regions; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) dev_warn(&pdev->dev, "device has more than "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) __stringify(MAX_UIO_MAPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) " dynamic and fixed memory regions.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) uiomem->memtype = UIO_MEM_PHYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) uiomem->addr = DMEM_MAP_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) uiomem->size = pdata->dynamic_region_sizes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ++uiomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) uiomem->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ++uiomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* This driver requires no hardware specific kernel code to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * interrupts. Instead, the interrupt handler simply disables the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * interrupt in the interrupt controller. User space is responsible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * for performing hardware specific acknowledge and re-enabling of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * the interrupt in the interrupt controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * Interrupt sharing is not supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) uioinfo->handler = uio_dmem_genirq_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) uioinfo->irqcontrol = uio_dmem_genirq_irqcontrol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) uioinfo->open = uio_dmem_genirq_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) uioinfo->release = uio_dmem_genirq_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) uioinfo->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* Enable Runtime PM for this device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * The device starts in suspended state to allow the hardware to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * turned off by default. The Runtime PM bus code should power on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * hardware and enable clocks at open().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ret = uio_register_device(&pdev->dev, priv->uioinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) dev_err(&pdev->dev, "unable to register uio device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) goto bad1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) platform_set_drvdata(pdev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) bad1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) bad0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* kfree uioinfo for OF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (pdev->dev.of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) kfree(uioinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) bad2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int uio_dmem_genirq_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct uio_dmem_genirq_platdata *priv = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) uio_unregister_device(priv->uioinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) priv->uioinfo->handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) priv->uioinfo->irqcontrol = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* kfree uioinfo for OF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (pdev->dev.of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) kfree(priv->uioinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static int uio_dmem_genirq_runtime_nop(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /* Runtime PM callback shared between ->runtime_suspend()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * and ->runtime_resume(). Simply returns success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * In this driver pm_runtime_get_sync() and pm_runtime_put_sync()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * are used at open() and release() time. This allows the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * Runtime PM code to turn off power to the device while the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * device is unused, ie before open() and after release().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * This Runtime PM callback does not need to save or restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * any registers since user space is responsbile for hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * register reinitialization after open().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static const struct dev_pm_ops uio_dmem_genirq_dev_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) .runtime_suspend = uio_dmem_genirq_runtime_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) .runtime_resume = uio_dmem_genirq_runtime_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static const struct of_device_id uio_of_genirq_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) { /* empty for now */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static struct platform_driver uio_dmem_genirq = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) .probe = uio_dmem_genirq_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) .remove = uio_dmem_genirq_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) .name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) .pm = &uio_dmem_genirq_dev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) .of_match_table = of_match_ptr(uio_of_genirq_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) module_platform_driver(uio_dmem_genirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) MODULE_AUTHOR("Damian Hobson-Garcia");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) MODULE_DESCRIPTION("Userspace I/O platform driver with dynamic memory.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) MODULE_ALIAS("platform:" DRIVER_NAME);