^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Devices PM QoS constraints management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2011 Texas Instruments, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This module exposes the interface to kernel space for specifying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * per-device PM QoS dependencies. It provides infrastructure for registration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Dependents on a QoS value : register requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Watchers of QoS value : get notified when target QoS value changes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * This QoS design is best effort based. Dependents register their QoS needs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Watchers register to keep track of the current QoS needs of the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Watchers can register a per-device notification callback using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * dev_pm_qos_*_notifier API. The notification chain data is stored in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * per-device constraint data struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Note about the per-device constraint data struct allocation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * . The per-device constraints data struct ptr is stored into the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * dev_pm_info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * . To minimize the data usage by the per-device constraints, the data struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * is only allocated at the first call to dev_pm_qos_add_request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * . The data is later free'd when the device is removed from the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * . A global mutex protects the constraints users from the data being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * allocated and free'd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/pm_qos.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <trace/events/power.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "power.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static DEFINE_MUTEX(dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * __dev_pm_qos_flags - Check PM QoS flags for a given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * @dev: Device to check the PM QoS flags for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * @mask: Flags to check against.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * This routine must be called with dev->power.lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct dev_pm_qos *qos = dev->power.qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct pm_qos_flags *pqf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) s32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) lockdep_assert_held(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (IS_ERR_OR_NULL(qos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return PM_QOS_FLAGS_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) pqf = &qos->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (list_empty(&pqf->list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return PM_QOS_FLAGS_UNDEFINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) val = pqf->effective_flags & mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return PM_QOS_FLAGS_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * @dev: Device to check the PM QoS flags for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * @mask: Flags to check against.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) enum pm_qos_flags_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) spin_lock_irqsave(&dev->power.lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ret = __dev_pm_qos_flags(dev, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) spin_unlock_irqrestore(&dev->power.lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @dev: Device to get the PM QoS constraint value for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * This routine must be called with dev->power.lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) s32 __dev_pm_qos_resume_latency(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) lockdep_assert_held(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return dev_pm_qos_raw_resume_latency(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * @dev: Device to get the PM QoS constraint value for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * @type: QoS request type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct dev_pm_qos *qos = dev->power.qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) s32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) spin_lock_irqsave(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) case DEV_PM_QOS_RESUME_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) : pm_qos_read_value(&qos->resume_latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) case DEV_PM_QOS_MIN_FREQUENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) : freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) case DEV_PM_QOS_MAX_FREQUENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) : freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) spin_unlock_irqrestore(&dev->power.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) EXPORT_SYMBOL_GPL(dev_pm_qos_read_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * apply_constraint - Add/modify/remove device PM QoS request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * @req: Constraint request to apply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * @action: Action to perform (add/update/remove).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @value: Value to assign to the QoS request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * Internal function to update the constraints list using the PM QoS core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * code and if needed call the per-device callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static int apply_constraint(struct dev_pm_qos_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) enum pm_qos_req_action action, s32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct dev_pm_qos *qos = req->dev->power.qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) switch(req->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) case DEV_PM_QOS_RESUME_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ret = pm_qos_update_target(&qos->resume_latency,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) &req->data.pnode, action, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) case DEV_PM_QOS_LATENCY_TOLERANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) ret = pm_qos_update_target(&qos->latency_tolerance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) &req->data.pnode, action, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) value = pm_qos_read_value(&qos->latency_tolerance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) req->dev->power.set_latency_tolerance(req->dev, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) case DEV_PM_QOS_MIN_FREQUENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) case DEV_PM_QOS_MAX_FREQUENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ret = freq_qos_apply(&req->data.freq, action, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) case DEV_PM_QOS_FLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) action, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * dev_pm_qos_constraints_allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * @dev: device to allocate data for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * Called at the first call to add_request, for constraint data allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * Must be called with the dev_pm_qos_mtx mutex held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static int dev_pm_qos_constraints_allocate(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct dev_pm_qos *qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct pm_qos_constraints *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct blocking_notifier_head *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) qos = kzalloc(sizeof(*qos), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (!qos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (!n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) kfree(qos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) c = &qos->resume_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) plist_head_init(&c->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) c->type = PM_QOS_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) c->notifiers = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) BLOCKING_INIT_NOTIFIER_HEAD(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) c = &qos->latency_tolerance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) plist_head_init(&c->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) c->type = PM_QOS_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) freq_constraints_init(&qos->freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) INIT_LIST_HEAD(&qos->flags.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) dev->power.qos = qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static void __dev_pm_qos_hide_latency_limit(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static void __dev_pm_qos_hide_flags(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * dev_pm_qos_constraints_destroy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * @dev: target device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * Called from the device PM subsystem on device removal under device_pm_lock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) void dev_pm_qos_constraints_destroy(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct dev_pm_qos *qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct dev_pm_qos_request *req, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct pm_qos_constraints *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) struct pm_qos_flags *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) mutex_lock(&dev_pm_qos_sysfs_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * If the device's PM QoS resume latency limit or PM QoS flags have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * exposed to user space, they have to be hidden at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) pm_qos_sysfs_remove_resume_latency(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) pm_qos_sysfs_remove_flags(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) mutex_lock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) __dev_pm_qos_hide_latency_limit(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) __dev_pm_qos_hide_flags(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) qos = dev->power.qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (!qos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* Flush the constraints lists for the device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) c = &qos->resume_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * Update constraints list and call the notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * callbacks if needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) c = &qos->latency_tolerance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) c = &qos->freq.min_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) apply_constraint(req, PM_QOS_REMOVE_REQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) c = &qos->freq.max_freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) apply_constraint(req, PM_QOS_REMOVE_REQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) f = &qos->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) spin_lock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) dev->power.qos = ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) spin_unlock_irq(&dev->power.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) kfree(qos->resume_latency.notifiers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) kfree(qos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) mutex_unlock(&dev_pm_qos_sysfs_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static bool dev_pm_qos_invalid_req_type(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) enum dev_pm_qos_req_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) !dev->power.set_latency_tolerance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static int __dev_pm_qos_add_request(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct dev_pm_qos_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) enum dev_pm_qos_req_type type, s32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (WARN(dev_pm_qos_request_active(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) "%s() called for already added request\n", __func__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (IS_ERR(dev->power.qos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) else if (!dev->power.qos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) ret = dev_pm_qos_constraints_allocate(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) trace_dev_pm_qos_add_request(dev_name(dev), type, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) req->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) req->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ret = freq_qos_add_request(&dev->power.qos->freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) &req->data.freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) FREQ_QOS_MIN, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ret = freq_qos_add_request(&dev->power.qos->freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) &req->data.freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) FREQ_QOS_MAX, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * dev_pm_qos_add_request - inserts new qos request into the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * @dev: target device for the constraint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * @req: pointer to a preallocated handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * @type: type of the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * @value: defines the qos request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * This function inserts a new entry in the device constraints list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * requested qos performance characteristics. It recomputes the aggregate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * QoS expectations of parameters and initializes the dev_pm_qos_request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * handle. Caller needs to save this handle for later use in updates and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * removal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * Returns 1 if the aggregated constraint value has changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * 0 if the aggregated constraint value has not changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * to allocate for data structures, -ENODEV if the device has just been removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * from the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * Callers should ensure that the target device is not RPM_SUSPENDED before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * using this function for requests of type DEV_PM_QOS_FLAGS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) enum dev_pm_qos_req_type type, s32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) mutex_lock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ret = __dev_pm_qos_add_request(dev, req, type, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * @req : PM QoS request to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * @new_value: New value to request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) s32 new_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) s32 curr_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (!req) /*guard against callers passing in null */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (WARN(!dev_pm_qos_request_active(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) "%s() called for unknown object\n", __func__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (IS_ERR_OR_NULL(req->dev->power.qos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) switch(req->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) case DEV_PM_QOS_RESUME_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) case DEV_PM_QOS_LATENCY_TOLERANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) curr_value = req->data.pnode.prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) case DEV_PM_QOS_MIN_FREQUENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) case DEV_PM_QOS_MAX_FREQUENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) curr_value = req->data.freq.pnode.prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) case DEV_PM_QOS_FLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) curr_value = req->data.flr.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) new_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (curr_value != new_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * dev_pm_qos_update_request - modifies an existing qos request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * @req : handle to list element holding a dev_pm_qos request to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * @new_value: defines the qos request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * Updates an existing dev PM qos request along with updating the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * target value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Attempts are made to make this code callable on hot code paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * Returns 1 if the aggregated constraint value has changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * 0 if the aggregated constraint value has not changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * -EINVAL in case of wrong parameters, -ENODEV if the device has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * removed from the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * Callers should ensure that the target device is not RPM_SUSPENDED before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * using this function for requests of type DEV_PM_QOS_FLAGS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) mutex_lock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ret = __dev_pm_qos_update_request(req, new_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (!req) /*guard against callers passing in null */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (WARN(!dev_pm_qos_request_active(req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) "%s() called for unknown object\n", __func__))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (IS_ERR_OR_NULL(req->dev->power.qos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) PM_QOS_DEFAULT_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * dev_pm_qos_remove_request - modifies an existing qos request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * @req: handle to request list element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * Will remove pm qos request from the list of constraints and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * recompute the current target value. Call this on slow code paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * Returns 1 if the aggregated constraint value has changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * 0 if the aggregated constraint value has not changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * -EINVAL in case of wrong parameters, -ENODEV if the device has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * removed from the system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * Callers should ensure that the target device is not RPM_SUSPENDED before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * using this function for requests of type DEV_PM_QOS_FLAGS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) mutex_lock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ret = __dev_pm_qos_remove_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * dev_pm_qos_add_notifier - sets notification entry for changes to target value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * of per-device PM QoS constraints
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * @dev: target device for the constraint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * @notifier: notifier block managed by caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * @type: request type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * Will register the notifier into a notification chain that gets called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * upon changes to the target value for the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * If the device's constraints object doesn't exist when this routine is called,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * it will be created (or error code will be returned if that fails).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) enum dev_pm_qos_req_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) mutex_lock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (IS_ERR(dev->power.qos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) else if (!dev->power.qos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ret = dev_pm_qos_constraints_allocate(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) case DEV_PM_QOS_RESUME_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) case DEV_PM_QOS_MIN_FREQUENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) ret = freq_qos_add_notifier(&dev->power.qos->freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) FREQ_QOS_MIN, notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) case DEV_PM_QOS_MAX_FREQUENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ret = freq_qos_add_notifier(&dev->power.qos->freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) FREQ_QOS_MAX, notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * dev_pm_qos_remove_notifier - deletes notification for changes to target value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * of per-device PM QoS constraints
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * @dev: target device for the constraint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * @notifier: notifier block to be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * @type: request type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * Will remove the notifier from the notification chain that gets called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * upon changes to the target value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) int dev_pm_qos_remove_notifier(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct notifier_block *notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) enum dev_pm_qos_req_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) mutex_lock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /* Silently return if the constraints object is not present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (IS_ERR_OR_NULL(dev->power.qos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) case DEV_PM_QOS_RESUME_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) case DEV_PM_QOS_MIN_FREQUENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) ret = freq_qos_remove_notifier(&dev->power.qos->freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) FREQ_QOS_MIN, notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) case DEV_PM_QOS_MAX_FREQUENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) ret = freq_qos_remove_notifier(&dev->power.qos->freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) FREQ_QOS_MAX, notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * @dev: Device whose ancestor to add the request for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * @req: Pointer to the preallocated handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * @type: Type of the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * @value: Constraint latency value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) int dev_pm_qos_add_ancestor_request(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct dev_pm_qos_request *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) enum dev_pm_qos_req_type type, s32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct device *ancestor = dev->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) case DEV_PM_QOS_RESUME_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) while (ancestor && !ancestor->power.ignore_children)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ancestor = ancestor->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) case DEV_PM_QOS_LATENCY_TOLERANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) while (ancestor && !ancestor->power.set_latency_tolerance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ancestor = ancestor->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) ancestor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (ancestor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) ret = dev_pm_qos_add_request(ancestor, req, type, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) req->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static void __dev_pm_qos_drop_user_request(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) enum dev_pm_qos_req_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct dev_pm_qos_request *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) switch(type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) case DEV_PM_QOS_RESUME_LATENCY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) req = dev->power.qos->resume_latency_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) dev->power.qos->resume_latency_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) case DEV_PM_QOS_LATENCY_TOLERANCE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) req = dev->power.qos->latency_tolerance_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) dev->power.qos->latency_tolerance_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) case DEV_PM_QOS_FLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) req = dev->power.qos->flags_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) dev->power.qos->flags_req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) __dev_pm_qos_remove_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static void dev_pm_qos_drop_user_request(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) enum dev_pm_qos_req_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) mutex_lock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) __dev_pm_qos_drop_user_request(dev, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * @dev: Device whose PM QoS latency limit is to be exposed to user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * @value: Initial value of the latency limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct dev_pm_qos_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (!device_is_registered(dev) || value < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) req = kzalloc(sizeof(*req), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) mutex_lock(&dev_pm_qos_sysfs_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) mutex_lock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (IS_ERR_OR_NULL(dev->power.qos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) else if (dev->power.qos->resume_latency_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) ret = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) __dev_pm_qos_remove_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) dev->power.qos->resume_latency_req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ret = pm_qos_sysfs_add_resume_latency(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) mutex_unlock(&dev_pm_qos_sysfs_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static void __dev_pm_qos_hide_latency_limit(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * @dev: Device whose PM QoS latency limit is to be hidden from user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) void dev_pm_qos_hide_latency_limit(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) mutex_lock(&dev_pm_qos_sysfs_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) pm_qos_sysfs_remove_resume_latency(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) mutex_lock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) __dev_pm_qos_hide_latency_limit(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) mutex_unlock(&dev_pm_qos_sysfs_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * @dev: Device whose PM QoS flags are to be exposed to user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * @val: Initial values of the flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) int dev_pm_qos_expose_flags(struct device *dev, s32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct dev_pm_qos_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (!device_is_registered(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) req = kzalloc(sizeof(*req), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) mutex_lock(&dev_pm_qos_sysfs_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) mutex_lock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (IS_ERR_OR_NULL(dev->power.qos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) else if (dev->power.qos->flags_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ret = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) __dev_pm_qos_remove_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) dev->power.qos->flags_req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) ret = pm_qos_sysfs_add_flags(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) mutex_unlock(&dev_pm_qos_sysfs_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) static void __dev_pm_qos_hide_flags(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * @dev: Device whose PM QoS flags are to be hidden from user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) void dev_pm_qos_hide_flags(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) mutex_lock(&dev_pm_qos_sysfs_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) pm_qos_sysfs_remove_flags(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) mutex_lock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) __dev_pm_qos_hide_flags(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) mutex_unlock(&dev_pm_qos_sysfs_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * @dev: Device to update the PM QoS flags request for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * @mask: Flags to set/clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * @set: Whether to set or clear the flags (true means set).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) s32 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) mutex_lock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) value = dev_pm_qos_requested_flags(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) value |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) value &= ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * @dev: Device to obtain the user space latency tolerance for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) s32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) mutex_lock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) ret = IS_ERR_OR_NULL(dev->power.qos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) || !dev->power.qos->latency_tolerance_req ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) dev->power.qos->latency_tolerance_req->data.pnode.prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * @dev: Device to update the user space latency tolerance for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * @val: New user space latency tolerance for @dev (negative values disable).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) mutex_lock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (IS_ERR_OR_NULL(dev->power.qos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) || !dev->power.qos->latency_tolerance_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) struct dev_pm_qos_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (val < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) req = kzalloc(sizeof(*req), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) dev->power.qos->latency_tolerance_req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (val < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) mutex_unlock(&dev_pm_qos_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * @dev: Device whose latency tolerance to expose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) int dev_pm_qos_expose_latency_tolerance(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (!dev->power.set_latency_tolerance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) mutex_lock(&dev_pm_qos_sysfs_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) ret = pm_qos_sysfs_add_latency_tolerance(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) mutex_unlock(&dev_pm_qos_sysfs_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * @dev: Device whose latency tolerance to hide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) void dev_pm_qos_hide_latency_tolerance(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) mutex_lock(&dev_pm_qos_sysfs_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) pm_qos_sysfs_remove_latency_tolerance(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) mutex_unlock(&dev_pm_qos_sysfs_mtx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /* Remove the request from user space now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) dev_pm_qos_update_user_latency_tolerance(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) pm_runtime_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);