^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Device wakeirq helper functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/pm_wakeirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "power.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * dev_pm_attach_wake_irq - Attach device interrupt as a wake IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * @dev: Device entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * @irq: Device wake-up capable interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * @wirq: Wake irq specific data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Internal function to attach either a device IO interrupt or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * dedicated wake-up interrupt as a wake IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static int dev_pm_attach_wake_irq(struct device *dev, int irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct wake_irq *wirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (!dev || !wirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) spin_lock_irqsave(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) if (dev_WARN_ONCE(dev, dev->power.wakeirq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) "wake irq already initialized\n")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) spin_unlock_irqrestore(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) dev->power.wakeirq = wirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) device_wakeup_attach_irq(dev, wirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) spin_unlock_irqrestore(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * dev_pm_set_wake_irq - Attach device IO interrupt as wake IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * @dev: Device entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * @irq: Device IO interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * Attach a device IO interrupt as a wake IRQ. The wake IRQ gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * automatically configured for wake-up from suspend based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * on the device specific sysfs wakeup entry. Typically called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * during driver probe after calling device_init_wakeup().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int dev_pm_set_wake_irq(struct device *dev, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct wake_irq *wirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (!wirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) wirq->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) wirq->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) err = dev_pm_attach_wake_irq(dev, irq, wirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) kfree(wirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * dev_pm_clear_wake_irq - Detach a device IO interrupt wake IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * @dev: Device entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * Detach a device wake IRQ and free resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Note that it's OK for drivers to call this without calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * dev_pm_set_wake_irq() as all the driver instances may not have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * a wake IRQ configured. This avoid adding wake IRQ specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * checks into the drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) void dev_pm_clear_wake_irq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct wake_irq *wirq = dev->power.wakeirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (!wirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) spin_lock_irqsave(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) device_wakeup_detach_irq(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) dev->power.wakeirq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) spin_unlock_irqrestore(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) free_irq(wirq->irq, wirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) kfree(wirq->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) kfree(wirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * handle_threaded_wake_irq - Handler for dedicated wake-up interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * @irq: Device specific dedicated wake-up interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * @_wirq: Wake IRQ data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * Some devices have a separate wake-up interrupt in addition to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * device IO interrupt. The wake-up interrupt signals that a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * should be woken up from it's idle state. This handler uses device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * specific pm_runtime functions to wake the device, and then it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * up to the device to do whatever it needs to. Note that as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * device may need to restore context and start up regulators, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * use a threaded IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * Also note that we are not resending the lost device interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * We assume that the wake-up interrupt just needs to wake-up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * device, and then device's pm_runtime_resume() can deal with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct wake_irq *wirq = _wirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Maybe abort suspend? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) pm_wakeup_event(wirq->dev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* We don't want RPM_ASYNC or RPM_NOWAIT here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) res = pm_runtime_resume(wirq->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) dev_warn(wirq->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) "wake IRQ with no resume: %i\n", res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * @dev: Device entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * @irq: Device wake-up interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * Unless your hardware has separate wake-up interrupts in addition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * to the device IO interrupts, you don't need this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Sets up a threaded interrupt handler for a device that has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * a dedicated wake-up interrupt in addition to the device IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * The interrupt starts disabled, and needs to be managed for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * the device by the bus code or the device driver using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct wake_irq *wirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!wirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (!wirq->name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) wirq->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) wirq->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) irq_set_status_flags(irq, IRQ_NOAUTOEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Prevent deferred spurious wakeirqs with disable_irq_nosync() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * Consumer device may need to power up and restore state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * so we use a threaded irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) IRQF_ONESHOT, wirq->name, wirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) goto err_free_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) err = dev_pm_attach_wake_irq(dev, irq, wirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) goto err_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) err_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) free_irq(irq, wirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) err_free_name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) kfree(wirq->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) kfree(wirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * dev_pm_enable_wake_irq - Enable device wake-up interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * @dev: Device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Optionally called from the bus code or the device driver for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * runtime_resume() to override the PM runtime core managed wake-up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * interrupt handling to enable the wake-up interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * Note that for runtime_suspend()) the wake-up interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * should be unconditionally enabled unlike for suspend()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * that is conditional.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) void dev_pm_enable_wake_irq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct wake_irq *wirq = dev->power.wakeirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) enable_irq(wirq->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * dev_pm_disable_wake_irq - Disable device wake-up interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * @dev: Device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * Optionally called from the bus code or the device driver for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * runtime_suspend() to override the PM runtime core managed wake-up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * interrupt handling to disable the wake-up interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) void dev_pm_disable_wake_irq(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct wake_irq *wirq = dev->power.wakeirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) disable_irq_nosync(wirq->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * dev_pm_enable_wake_irq_check - Checks and enables wake-up interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * @dev: Device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * @can_change_status: Can change wake-up interrupt status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * Enables wakeirq conditionally. We need to enable wake-up interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * lazily on the first rpm_suspend(). This is needed as the consumer device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * starts in RPM_SUSPENDED state, and the the first pm_runtime_get() would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * otherwise try to disable already disabled wakeirq. The wake-up interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * starts disabled with IRQ_NOAUTOEN set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * Should be only called from rpm_suspend() and rpm_resume() path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * Caller must hold &dev->power.lock to change wirq->status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) void dev_pm_enable_wake_irq_check(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) bool can_change_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct wake_irq *wirq = dev->power.wakeirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) goto enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) } else if (can_change_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) goto enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) enable_irq(wirq->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * @dev: Device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * Disables wake-up interrupt conditionally based on status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Should be only called from rpm_suspend() and rpm_resume() path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) void dev_pm_disable_wake_irq_check(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct wake_irq *wirq = dev->power.wakeirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) disable_irq_nosync(wirq->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * dev_pm_arm_wake_irq - Arm device wake-up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * @wirq: Device wake-up interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Sets up the wake-up event conditionally based on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * device_may_wake().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) void dev_pm_arm_wake_irq(struct wake_irq *wirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (!wirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (device_may_wakeup(wirq->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) !pm_runtime_status_suspended(wirq->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) enable_irq(wirq->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) enable_irq_wake(wirq->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * dev_pm_disarm_wake_irq - Disarm device wake-up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * @wirq: Device wake-up interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * Clears up the wake-up event conditionally based on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * device_may_wake().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (!wirq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (device_may_wakeup(wirq->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) disable_irq_wake(wirq->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) !pm_runtime_status_suspended(wirq->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) disable_irq_nosync(wirq->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }