^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * OMAP4+ CPU idle Routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2011-2013 Texas Instruments, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Santosh Shilimkar <santosh.shilimkar@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Rajendra Nayak <rnayak@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/cpuidle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/cpu_pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/tick.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/cpuidle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "pm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "prm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "soc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "clockdomain.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define MAX_CPUS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* Machine specific information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct idle_statedata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u32 cpu_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) u32 mpu_logic_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) u32 mpu_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) u32 mpu_state_vote;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static struct idle_statedata omap4_idle_data[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) .cpu_state = PWRDM_POWER_ON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) .mpu_state = PWRDM_POWER_ON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) .mpu_logic_state = PWRDM_POWER_RET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) .cpu_state = PWRDM_POWER_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) .mpu_state = PWRDM_POWER_RET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) .mpu_logic_state = PWRDM_POWER_RET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .cpu_state = PWRDM_POWER_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) .mpu_state = PWRDM_POWER_RET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) .mpu_logic_state = PWRDM_POWER_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static struct idle_statedata omap5_idle_data[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) .cpu_state = PWRDM_POWER_ON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) .mpu_state = PWRDM_POWER_ON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) .mpu_logic_state = PWRDM_POWER_ON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) .cpu_state = PWRDM_POWER_RET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) .mpu_state = PWRDM_POWER_RET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) .mpu_logic_state = PWRDM_POWER_RET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static struct clockdomain *cpu_clkdm[MAX_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static atomic_t abort_barrier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) static bool cpu_done[MAX_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static struct idle_statedata *state_ptr = &omap4_idle_data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static DEFINE_RAW_SPINLOCK(mpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* Private functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * @dev: cpuidle device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * @drv: cpuidle driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * @index: the index of state to be entered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Called from the CPUidle framework to program the device to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * specified low power state selected by the governor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * Returns the amount of time spent in the low power state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static int omap_enter_idle_simple(struct cpuidle_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct cpuidle_driver *drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) omap_do_wfi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static int omap_enter_idle_smp(struct cpuidle_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct cpuidle_driver *drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct idle_statedata *cx = state_ptr + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) raw_spin_lock_irqsave(&mpu_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) cx->mpu_state_vote++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (cx->mpu_state_vote == num_online_cpus()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) raw_spin_unlock_irqrestore(&mpu_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) omap4_enter_lowpower(dev->cpu, cx->cpu_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) raw_spin_lock_irqsave(&mpu_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (cx->mpu_state_vote == num_online_cpus())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) cx->mpu_state_vote--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) raw_spin_unlock_irqrestore(&mpu_lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static int omap_enter_idle_coupled(struct cpuidle_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct cpuidle_driver *drv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct idle_statedata *cx = state_ptr + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u32 mpuss_can_lose_context = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * CPU0 has to wait and stay ON until CPU1 is OFF state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * This is necessary to honour hardware recommondation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * of triggeing all the possible low power modes once CPU1 is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * out of coherency and in OFF mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * CPU1 could have already entered & exited idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * without hitting off because of a wakeup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * or a failed attempt to hit off mode. Check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * that here, otherwise we could spin forever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * waiting for CPU1 off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (cpu_done[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) (cx->mpu_logic_state == PWRDM_POWER_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Enter broadcast mode for periodic timers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) RCU_NONIDLE(tick_broadcast_enable());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* Enter broadcast mode for one-shot timers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) RCU_NONIDLE(tick_broadcast_enter());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * Call idle CPU PM enter notifier chain so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * VFP and per CPU interrupt context is saved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) error = cpu_pm_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) goto cpu_pm_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (dev->cpu == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * Call idle CPU cluster PM enter notifier chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * to save GIC and wakeupgen context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (mpuss_can_lose_context) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) error = cpu_cluster_pm_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) cx = state_ptr + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) mpuss_can_lose_context = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) omap4_enter_lowpower(dev->cpu, cx->cpu_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) cpu_done[dev->cpu] = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Wakeup CPU1 only if it is not offlined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) mpuss_can_lose_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) gic_dist_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) mpuss_can_lose_context) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) while (gic_dist_disabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) gic_timer_retrigger();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * Call idle CPU cluster PM exit notifier chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * to restore GIC and wakeupgen context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (dev->cpu == 0 && mpuss_can_lose_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) cpu_cluster_pm_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * Call idle CPU PM exit notifier chain to restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * VFP and per CPU IRQ context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) cpu_pm_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) cpu_pm_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) RCU_NONIDLE(tick_broadcast_exit());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) cpu_done[dev->cpu] = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) static struct cpuidle_driver omap4_idle_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) .name = "omap4_idle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) .states = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* C1 - CPU0 ON + CPU1 ON + MPU ON */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) .exit_latency = 2 + 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) .target_residency = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) .enter = omap_enter_idle_simple,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) .name = "C1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) .desc = "CPUx ON, MPUSS ON"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) .exit_latency = 328 + 440,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) .target_residency = 960,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) .flags = CPUIDLE_FLAG_COUPLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) .enter = omap_enter_idle_coupled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) .name = "C2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) .desc = "CPUx OFF, MPUSS CSWR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) .exit_latency = 460 + 518,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) .target_residency = 1100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) .flags = CPUIDLE_FLAG_COUPLED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .enter = omap_enter_idle_coupled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .name = "C3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) .desc = "CPUx OFF, MPUSS OSWR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) .state_count = ARRAY_SIZE(omap4_idle_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) .safe_state_index = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static struct cpuidle_driver omap5_idle_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) .name = "omap5_idle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) .states = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* C1 - CPU0 ON + CPU1 ON + MPU ON */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) .exit_latency = 2 + 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) .target_residency = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) .enter = omap_enter_idle_simple,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .name = "C1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) .desc = "CPUx WFI, MPUSS ON"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* C2 - CPU0 RET + CPU1 RET + MPU CSWR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) .exit_latency = 48 + 60,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) .target_residency = 100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) .flags = CPUIDLE_FLAG_TIMER_STOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) .enter = omap_enter_idle_smp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) .name = "C2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) .desc = "CPUx CSWR, MPUSS CSWR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) .state_count = ARRAY_SIZE(omap5_idle_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) .safe_state_index = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* Public functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * omap4_idle_init - Init routine for OMAP4+ idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Registers the OMAP4+ specific cpuidle driver to the cpuidle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * framework with the valid set of states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int __init omap4_idle_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct cpuidle_driver *idle_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (soc_is_omap54xx()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) state_ptr = &omap5_idle_data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) idle_driver = &omap5_idle_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) state_ptr = &omap4_idle_data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) idle_driver = &omap4_idle_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) mpu_pd = pwrdm_lookup("mpu_pwrdm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (!cpu_clkdm[0] || !cpu_clkdm[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return cpuidle_register(idle_driver, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }