^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * DT idle states parsing code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2014 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define pr_fmt(fmt) "DT idle-states: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/cpuidle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "dt_idle_states.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static int init_state_node(struct cpuidle_state *idle_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) const struct of_device_id *match_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct device_node *state_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) const char *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * CPUidle drivers are expected to initialize the const void *data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * pointer of the passed in struct of_device_id array to the idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * state enter function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) idle_state->enter = match_id->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Since this is not a "coupled" state, it's safe to assume interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * won't be enabled when it exits allowing the tick to be frozen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * safely. So enter() can be also enter_s2idle() callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) idle_state->enter_s2idle = match_id->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) err = of_property_read_u32(state_node, "wakeup-latency-us",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) &idle_state->exit_latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u32 entry_latency, exit_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) err = of_property_read_u32(state_node, "entry-latency-us",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) &entry_latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) pr_debug(" * %pOF missing entry-latency-us property\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) state_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) err = of_property_read_u32(state_node, "exit-latency-us",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) &exit_latency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) pr_debug(" * %pOF missing exit-latency-us property\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) state_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * If wakeup-latency-us is missing, default to entry+exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * latencies as defined in idle states bindings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) idle_state->exit_latency = entry_latency + exit_latency;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) err = of_property_read_u32(state_node, "min-residency-us",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) &idle_state->target_residency);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) pr_debug(" * %pOF missing min-residency-us property\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) state_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) err = of_property_read_string(state_node, "idle-state-name", &desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) desc = state_node->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) idle_state->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (of_property_read_bool(state_node, "local-timer-stop"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * TODO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * replace with kstrdup and pointer assignment when name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * and desc become string pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) strncpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) strncpy(idle_state->desc, desc, CPUIDLE_DESC_LEN - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * Check that the idle state is uniform across all CPUs in the CPUidle driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * cpumask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) const cpumask_t *cpumask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct device_node *cpu_node, *curr_state_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) bool valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * Compare idle state phandles for index idx on all CPUs in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * CPUidle driver cpumask. Start from next logical cpu following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * cpumask_first(cpumask) since that's the CPU state_node was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * retrieved from. If a mismatch is found bail out straight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * away since we certainly hit a firmware misconfiguration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) cpu_node = of_cpu_device_node_get(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) curr_state_node = of_get_cpu_state_node(cpu_node, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (state_node != curr_state_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) of_node_put(curr_state_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) of_node_put(cpu_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (!valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * dt_init_idle_driver() - Parse the DT idle states and initialize the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * idle driver states array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * @drv: Pointer to CPU idle driver to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * @matches: Array of of_device_id match structures to search in for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * compatible idle state nodes. The data pointer for each valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * struct of_device_id entry in the matches array must point to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * a function with the following signature, that corresponds to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * the CPUidle state enter function signature:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * int (*)(struct cpuidle_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * struct cpuidle_driver *drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * int index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * @start_idx: First idle state index to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * If DT idle states are detected and are valid the state count and states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * array entries in the cpuidle driver are initialized accordingly starting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * from index start_idx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Return: number of valid DT idle states parsed, <0 on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int dt_init_idle_driver(struct cpuidle_driver *drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) const struct of_device_id *matches,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned int start_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct cpuidle_state *idle_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct device_node *state_node, *cpu_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) const struct of_device_id *match_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) int i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) const cpumask_t *cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) unsigned int state_idx = start_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (state_idx >= CPUIDLE_STATE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * We get the idle states for the first logical cpu in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * driver mask (or cpu_possible_mask if the driver cpumask is not set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * and we check through idle_state_valid() if they are uniform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * across CPUs, otherwise we hit a firmware misconfiguration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) cpumask = drv->cpumask ? : cpu_possible_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) cpu_node = of_cpu_device_node_get(cpumask_first(cpumask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) for (i = 0; ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) state_node = of_get_cpu_state_node(cpu_node, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (!state_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) match_id = of_match_node(matches, state_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!match_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!of_device_is_available(state_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) of_node_put(state_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (!idle_state_valid(state_node, i, cpumask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) pr_warn("%pOF idle state not valid, bailing out\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) state_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (state_idx == CPUIDLE_STATE_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) pr_warn("State index reached static CPU idle driver states array size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) idle_state = &drv->states[state_idx++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) err = init_state_node(idle_state, match_id, state_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pr_err("Parsing idle state node %pOF failed with err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) state_node, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) of_node_put(state_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) of_node_put(state_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) of_node_put(cpu_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * Update the driver state count only if some valid DT idle states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * were detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) drv->state_count = state_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Return the number of present and valid DT idle states, which can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * also be 0 on platforms with missing DT idle states or legacy DT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * configuration predating the DT idle states bindings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) EXPORT_SYMBOL_GPL(dt_init_idle_driver);