^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * OMAP MPUSS low power code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2011 Texas Instruments, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Santosh Shilimkar <santosh.shilimkar@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * CPU0 and CPU1 LPRM modules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * CPU0, CPU1 and MPUSS each have there own power domain and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * hence multiple low power combinations of MPUSS are possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * The CPU0 and CPU1 can't support Closed switch Retention (CSWR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * because the mode is not supported by hw constraints of dormant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * mode. While waking up from the dormant mode, a reset signal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * to the Cortex-A9 processor must be asserted by the external
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * power controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * With architectural inputs and hardware recommendations, only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * below modes are supported from power gain vs latency point of view.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * CPU0 CPU1 MPUSS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * ----------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * ON ON ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * ON(Inactive) OFF ON(Inactive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * OFF OFF CSWR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * OFF OFF OSWR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * OFF OFF OFF(Device OFF *TBD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * ----------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Note: CPU0 is the master core and it is the last CPU to go down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * and first to wake-up when MPUSS low power states are excercised
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/smp_scu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/virt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <asm/hardware/cache-l2x0.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "soc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include "common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include "omap44xx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "omap4-sar-layout.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include "pm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include "prcm_mpu44xx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include "prcm_mpu54xx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include "prminst44xx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include "prcm44xx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include "prm44xx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include "prm-regbits-44xx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static void __iomem *sar_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static u32 old_cpu1_ns_pa_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #if defined(CONFIG_PM) && defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct omap4_cpu_pm_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct powerdomain *pwrdm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) void __iomem *scu_sar_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) void __iomem *wkup_sar_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) void __iomem *l2x0_sar_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * struct cpu_pm_ops - CPU pm operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * @finish_suspend: CPU suspend finisher function pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * @resume: CPU resume function pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * @scu_prepare: CPU Snoop Control program function pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * @hotplug_restart: CPU restart function pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * Structure holds functions pointer for CPU low power operations like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * suspend, resume and scu programming.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct cpu_pm_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int (*finish_suspend)(unsigned long cpu_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) void (*resume)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) void (*hotplug_restart)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static struct powerdomain *mpuss_pd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static u32 cpu_context_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static int default_finish_suspend(unsigned long cpu_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) omap_do_wfi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static void dummy_cpu_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static struct cpu_pm_ops omap_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) .finish_suspend = default_finish_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) .resume = dummy_cpu_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) .scu_prepare = dummy_scu_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) .hotplug_restart = dummy_cpu_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * Program the wakeup routine address for the CPU0 and CPU1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * used for OFF or DORMANT wakeup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (pm_info->wkup_sar_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) writel_relaxed(addr, pm_info->wkup_sar_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * Store the SCU power status value to scratchpad memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) u32 scu_pwr_st;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) switch (cpu_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) case PWRDM_POWER_RET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) scu_pwr_st = SCU_PM_DORMANT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) case PWRDM_POWER_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) scu_pwr_st = SCU_PM_POWEROFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) case PWRDM_POWER_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) case PWRDM_POWER_INACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) scu_pwr_st = SCU_PM_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (pm_info->scu_sar_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) writel_relaxed(scu_pwr_st, pm_info->scu_sar_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* Helper functions for MPUSS OSWR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static inline void mpuss_clear_prev_logic_pwrst(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (cpu_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) cpu_context_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) cpu_context_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) cpu_context_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) cpu_context_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * Store the CPU cluster state for L2X0 low power operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (pm_info->l2x0_sar_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) writel_relaxed(save_state, pm_info->l2x0_sar_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * in every restore MPUSS OFF path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #ifdef CONFIG_CACHE_L2X0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static void __init save_l2x0_context(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void __iomem *l2x0_base = omap4_get_l2cache_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (l2x0_base && sar_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) writel_relaxed(l2x0_saved_regs.aux_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) sar_base + L2X0_AUXCTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) sar_base + L2X0_PREFETCH_CTRL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static void __init save_l2x0_context(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * The purpose of this function is to manage low power programming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * of OMAP4 MPUSS subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * @cpu : CPU ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * @power_state: Low power state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * MPUSS states for the context save:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * save_state =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * 0 - Nothing lost and no need to save: MPUSS INACTIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * 1 - CPUx L1 and logic lost: MPUSS CSWR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) unsigned int save_state = 0, cpu_logic_state = PWRDM_POWER_RET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (omap_rev() == OMAP4430_REV_ES1_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) switch (power_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) case PWRDM_POWER_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) case PWRDM_POWER_INACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) save_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) case PWRDM_POWER_OFF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) cpu_logic_state = PWRDM_POWER_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) save_state = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) case PWRDM_POWER_RET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (IS_PM44XX_ERRATUM(PM_OMAP4_CPU_OSWR_DISABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) save_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * CPUx CSWR is invalid hardware state. Also CPUx OSWR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * doesn't make much scense, since logic is lost and $L1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * needs to be cleaned because of coherency. This makes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * CPUx OSWR equivalent to CPUX OFF and hence not supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) pwrdm_pre_transition(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * Check MPUSS next state and save interrupt controller if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * In MPUSS OSWR or device OFF, interrupt controller contest is lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) mpuss_clear_prev_logic_pwrst();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) save_state = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) cpu_clear_prev_logic_pwrst(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) pwrdm_set_logic_retst(pm_info->pwrdm, cpu_logic_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.resume));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) omap_pm_ops.scu_prepare(cpu, power_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) l2x0_pwrst_prepare(cpu, save_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * Call low level function with targeted low power state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (save_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) cpu_suspend(save_state, omap_pm_ops.finish_suspend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) omap_pm_ops.finish_suspend(save_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) && cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) gic_dist_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * Restore the CPUx power state to ON otherwise CPUx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * power domain can transitions to programmed low power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * state while doing WFI outside the low powe code. On
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * secure devices, CPUx does WFI which can result in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * domain transition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) pwrdm_post_transition(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * omap4_hotplug_cpu: OMAP4 CPU hotplug entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * @cpu : CPU ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * @power_state: CPU low power state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned int cpu_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (omap_rev() == OMAP4430_REV_ES1_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* Use the achievable power state for the domain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) power_state = pwrdm_get_valid_lp_state(pm_info->pwrdm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) false, power_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (power_state == PWRDM_POWER_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) cpu_state = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) set_cpu_wakeup_addr(cpu, __pa_symbol(omap_pm_ops.hotplug_restart));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) omap_pm_ops.scu_prepare(cpu, power_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * CPU never retuns back if targeted power state is OFF mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * CPU ONLINE follows normal CPU ONLINE ptah via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * omap4_secondary_startup().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) omap_pm_ops.finish_suspend(cpu_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * Enable Mercury Fast HG retention mode by default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static void enable_mercury_retention_mode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) reg = omap4_prcm_mpu_read_inst_reg(OMAP54XX_PRCM_MPU_DEVICE_INST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /* Enable HG_EN, HG_RAMPUP = fast mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) reg |= BIT(24) | BIT(25);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) omap4_prcm_mpu_write_inst_reg(reg, OMAP54XX_PRCM_MPU_DEVICE_INST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) OMAP54XX_PRCM_MPU_PRM_PSCON_COUNT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * Initialise OMAP4 MPUSS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int __init omap4_mpuss_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct omap4_cpu_pm_info *pm_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (omap_rev() == OMAP4430_REV_ES1_0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /* Initilaise per CPU PM information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) pm_info = &per_cpu(omap4_pm_info, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (sar_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (cpu_is_omap44xx())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) pm_info->wkup_sar_addr = sar_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) pm_info->wkup_sar_addr = sar_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) OMAP5_CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!pm_info->pwrdm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) pr_err("Lookup failed for CPU0 pwrdm\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* Clear CPU previous power domain state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) cpu_clear_prev_logic_pwrst(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Initialise CPU0 power domain state to ON */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) pm_info = &per_cpu(omap4_pm_info, 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (sar_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (cpu_is_omap44xx())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) pm_info->wkup_sar_addr = sar_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) pm_info->wkup_sar_addr = sar_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (!pm_info->pwrdm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) pr_err("Lookup failed for CPU1 pwrdm\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /* Clear CPU previous power domain state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) cpu_clear_prev_logic_pwrst(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* Initialise CPU1 power domain state to ON */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) mpuss_pd = pwrdm_lookup("mpu_pwrdm");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (!mpuss_pd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) pr_err("Failed to lookup MPUSS power domain\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) pwrdm_clear_all_prev_pwrst(mpuss_pd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) mpuss_clear_prev_logic_pwrst();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (sar_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /* Save device type on scratchpad for low level code to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) writel_relaxed((omap_type() != OMAP2_DEVICE_TYPE_GP) ? 1 : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) sar_base + OMAP_TYPE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) save_l2x0_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (cpu_is_omap44xx()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) omap_pm_ops.finish_suspend = omap4_finish_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) omap_pm_ops.resume = omap4_cpu_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) omap_pm_ops.scu_prepare = scu_pwrst_prepare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) omap_pm_ops.hotplug_restart = omap4_secondary_startup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) cpu_context_offset = OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) } else if (soc_is_omap54xx() || soc_is_dra7xx()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) cpu_context_offset = OMAP54XX_RM_CPU0_CPU0_CONTEXT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) enable_mercury_retention_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (cpu_is_omap446x())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) omap_pm_ops.hotplug_restart = omap4460_secondary_startup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) u32 omap4_get_cpu1_ns_pa_addr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return old_cpu1_ns_pa_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * For kexec, we must set CPU1_WAKEUP_NS_PA_ADDR to point to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * current kernel's secondary_startup() early before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * clockdomains_init(). Otherwise clockdomain_init() can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * wake CPU1 and cause a hang.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) void __init omap4_mpuss_early_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) unsigned long startup_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) void __iomem *ns_pa_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (!(soc_is_omap44xx() || soc_is_omap54xx()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) sar_base = omap4_get_sar_ram_base();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /* Save old NS_PA_ADDR for validity checks later on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (soc_is_omap44xx())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (soc_is_omap443x())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) startup_pa = __pa_symbol(omap4_secondary_startup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) else if (soc_is_omap446x())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) startup_pa = __pa_symbol(omap4460_secondary_startup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) startup_pa = __pa_symbol(omap5_secondary_hyp_startup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) startup_pa = __pa_symbol(omap5_secondary_startup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (soc_is_omap44xx())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) writel_relaxed(startup_pa, sar_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) writel_relaxed(startup_pa, sar_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }