Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * drivers/uio/uio_pdrv_genirq.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Userspace I/O platform driver with generic IRQ handling code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 2008 Magnus Damm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Based on uio_pdrv.c by Uwe Kleine-Koenig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Copyright (C) 2008 by Digi International Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/uio_driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define DRIVER_NAME "uio_pdrv_genirq"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) struct uio_pdrv_genirq_platdata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct uio_info *uioinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) /* Bits in uio_pdrv_genirq_platdata.flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	UIO_IRQ_DISABLED = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static int uio_pdrv_genirq_open(struct uio_info *info, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct uio_pdrv_genirq_platdata *priv = info->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	/* Wait until the Runtime PM code has woken up the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	pm_runtime_get_sync(&priv->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) static int uio_pdrv_genirq_release(struct uio_info *info, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct uio_pdrv_genirq_platdata *priv = info->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	/* Tell the Runtime PM code that the device has become idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	pm_runtime_put_sync(&priv->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) static irqreturn_t uio_pdrv_genirq_handler(int irq, struct uio_info *dev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	struct uio_pdrv_genirq_platdata *priv = dev_info->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	/* Just disable the interrupt in the interrupt controller, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	 * remember the state so we can allow user space to enable it later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	spin_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		disable_irq_nosync(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	spin_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	struct uio_pdrv_genirq_platdata *priv = dev_info->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	/* Allow user space to enable and disable the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	 * in the interrupt controller, but keep track of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	 * state to prevent per-irq depth damage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	 * Serialize this operation to support multiple tasks and concurrency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	 * with irq handler on SMP systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	if (irq_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		if (__test_and_clear_bit(UIO_IRQ_DISABLED, &priv->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			enable_irq(dev_info->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		if (!__test_and_set_bit(UIO_IRQ_DISABLED, &priv->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			disable_irq_nosync(dev_info->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static void uio_pdrv_genirq_cleanup(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	struct device *dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static int uio_pdrv_genirq_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	struct uio_info *uioinfo = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	struct device_node *node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct uio_pdrv_genirq_platdata *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	struct uio_mem *uiomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		/* alloc uioinfo for one device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		uioinfo = devm_kzalloc(&pdev->dev, sizeof(*uioinfo),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 				       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		if (!uioinfo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			dev_err(&pdev->dev, "unable to kmalloc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		if (!of_property_read_string(node, "linux,uio-name", &name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			uioinfo->name = devm_kstrdup(&pdev->dev, name, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 						       "%pOFn", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		uioinfo->version = "devicetree";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		/* Multiple IRQs are not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	if (!uioinfo || !uioinfo->name || !uioinfo->version) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		dev_err(&pdev->dev, "missing platform_data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (uioinfo->handler || uioinfo->irqcontrol ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	    uioinfo->irq_flags & IRQF_SHARED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		dev_err(&pdev->dev, "interrupt configuration error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	if (!priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		dev_err(&pdev->dev, "unable to kmalloc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	priv->uioinfo = uioinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	spin_lock_init(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	priv->flags = 0; /* interrupt is enabled to begin with */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	priv->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	if (!uioinfo->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		ret = platform_get_irq_optional(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		uioinfo->irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		if (ret == -ENXIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			uioinfo->irq = UIO_IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		else if (ret == -EPROBE_DEFER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		else if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			dev_err(&pdev->dev, "failed to get IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (uioinfo->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		struct irq_data *irq_data = irq_get_irq_data(uioinfo->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		 * If a level interrupt, dont do lazy disable. Otherwise the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		 * irq will fire again since clearing of the actual cause, on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		 * device level, is done in userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		 * irqd_is_level_type() isn't used since isn't valid until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		 * irq is configured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		if (irq_data &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		    irqd_get_trigger_type(irq_data) & IRQ_TYPE_LEVEL_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			dev_dbg(&pdev->dev, "disable lazy unmask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			irq_set_status_flags(uioinfo->irq, IRQ_DISABLE_UNLAZY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	uiomem = &uioinfo->mem[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	for (i = 0; i < pdev->num_resources; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		struct resource *r = &pdev->resource[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		if (r->flags != IORESOURCE_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			dev_warn(&pdev->dev, "device has more than "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 					__stringify(MAX_UIO_MAPS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 					" I/O memory resources.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		uiomem->memtype = UIO_MEM_PHYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		uiomem->addr = r->start & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		uiomem->offs = r->start & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		uiomem->size = (uiomem->offs + resource_size(r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 				+ PAGE_SIZE - 1) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		uiomem->name = r->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		++uiomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		uiomem->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		++uiomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	/* This driver requires no hardware specific kernel code to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	 * interrupts. Instead, the interrupt handler simply disables the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	 * interrupt in the interrupt controller. User space is responsible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	 * for performing hardware specific acknowledge and re-enabling of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	 * the interrupt in the interrupt controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	 * Interrupt sharing is not supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	uioinfo->handler = uio_pdrv_genirq_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	uioinfo->irqcontrol = uio_pdrv_genirq_irqcontrol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	uioinfo->open = uio_pdrv_genirq_open;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	uioinfo->release = uio_pdrv_genirq_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	uioinfo->priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	/* Enable Runtime PM for this device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	 * The device starts in suspended state to allow the hardware to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	 * turned off by default. The Runtime PM bus code should power on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	 * hardware and enable clocks at open().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	ret = devm_add_action_or_reset(&pdev->dev, uio_pdrv_genirq_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 				       &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	ret = devm_uio_register_device(&pdev->dev, priv->uioinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		dev_err(&pdev->dev, "unable to register uio device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static int uio_pdrv_genirq_runtime_nop(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	/* Runtime PM callback shared between ->runtime_suspend()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	 * and ->runtime_resume(). Simply returns success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	 * In this driver pm_runtime_get_sync() and pm_runtime_put_sync()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	 * are used at open() and release() time. This allows the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	 * Runtime PM code to turn off power to the device while the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	 * device is unused, ie before open() and after release().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	 * This Runtime PM callback does not need to save or restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	 * any registers since user space is responsbile for hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	 * register reinitialization after open().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	.runtime_suspend = uio_pdrv_genirq_runtime_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	.runtime_resume = uio_pdrv_genirq_runtime_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static struct of_device_id uio_of_genirq_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	{ /* This is filled with module_parm */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	{ /* Sentinel */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) module_param_string(of_id, uio_of_genirq_match[0].compatible, 128, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) MODULE_PARM_DESC(of_id, "Openfirmware id of the device to be handled by uio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static struct platform_driver uio_pdrv_genirq = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	.probe = uio_pdrv_genirq_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		.name = DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		.pm = &uio_pdrv_genirq_dev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		.of_match_table = of_match_ptr(uio_of_genirq_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) module_platform_driver(uio_pdrv_genirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) MODULE_AUTHOR("Magnus Damm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) MODULE_DESCRIPTION("Userspace I/O platform driver with generic IRQ handling");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) MODULE_ALIAS("platform:" DRIVER_NAME);