^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ARM/ARM64 generic CPU idle driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2014 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define pr_fmt(fmt) "CPUidle arm: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/cpu_cooling.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/cpuidle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/cpu_pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/cpuidle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "dt_idle_states.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * arm_enter_idle_state - Programs CPU to enter the specified state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * dev: cpuidle device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * drv: cpuidle driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * idx: state index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Called from the CPUidle framework to program the device to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * specified target state selected by the governor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static int arm_enter_idle_state(struct cpuidle_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct cpuidle_driver *drv, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Pass idle state index to arm_cpuidle_suspend which in turn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * will call the CPU ops suspend protocol with idle index as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static struct cpuidle_driver arm_idle_driver __initdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .name = "arm_idle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * State at index 0 is standby wfi and considered standard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * on all ARM platforms. If in some platforms simple wfi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * can't be used as "state 0", DT bindings must be implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * to work around this issue and allow installing a special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * handler for idle state index 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) .states[0] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) .enter = arm_enter_idle_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) .exit_latency = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) .target_residency = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) .power_usage = UINT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) .name = "WFI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) .desc = "ARM WFI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static const struct of_device_id arm_idle_state_match[] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) { .compatible = "arm,idle-state",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) .data = arm_enter_idle_state },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) { },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * arm_idle_init_cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Registers the arm specific cpuidle driver with the cpuidle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * framework. It relies on core code to parse the idle states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * and initialize them using driver data structures accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static int __init arm_idle_init_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct cpuidle_driver *drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (!drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) drv->cpumask = (struct cpumask *)cpumask_of(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Initialize idle states data, starting at index 1. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * driver is DT only, if no DT idle states are detected (ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * == 0) let the driver initialization fail accordingly since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * there is no reason to initialize the idle driver if only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * wfi is supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) ret = ret ? : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) goto out_kfree_drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Call arch CPU operations in order to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * idle states suspend back-end specific data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ret = arm_cpuidle_init(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Allow the initialization to continue for other CPUs, if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * reported failure is a HW misconfiguration/breakage (-ENXIO).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * Some platforms do not support idle operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * (arm_cpuidle_init() returning -EOPNOTSUPP), we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * not flag this case as an error, it is a valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (ret != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) pr_err("CPU %d failed to init idle CPU ops\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ret = ret == -ENXIO ? 0 : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) goto out_kfree_drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) ret = cpuidle_register(drv, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) goto out_kfree_drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) cpuidle_cooling_register(drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) out_kfree_drv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) kfree(drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * arm_idle_init - Initializes arm cpuidle driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * Initializes arm cpuidle driver for all CPUs, if any CPU fails
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * to register cpuidle driver then rollback to cancel all CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * registeration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static int __init arm_idle_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) int cpu, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct cpuidle_driver *drv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct cpuidle_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ret = arm_idle_init_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) out_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) while (--cpu >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) dev = per_cpu(cpuidle_devices, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) drv = cpuidle_get_cpu_driver(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) cpuidle_unregister(drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) kfree(drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) device_initcall(arm_idle_init);