^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * AM33XX Power Management Routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012-2018 Texas Instruments Incorporated - http://www.ti.com/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Vaibhav Bedia, Dave Gerlach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/genalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/nvmem-consumer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/platform_data/pm33xx.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/rtc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/rtc/rtc-omap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/sram.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/ti-emif-sram.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/wkup_m3_ipc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/proc-fns.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/system_misc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define AMX3_PM_SRAM_SYMBOL_OFFSET(sym) ((unsigned long)(sym) - \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) (unsigned long)pm_sram->do_wfi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define RTC_SCRATCH_RESUME_REG 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define RTC_SCRATCH_MAGIC_REG 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define RTC_REG_BOOT_MAGIC 0x8cd0 /* RTC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define GIC_INT_SET_PENDING_BASE 0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define AM43XX_GIC_DIST_BASE 0x48241000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static void __iomem *rtc_base_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static struct clk *rtc_fck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static u32 rtc_magic_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static int (*am33xx_do_wfi_sram)(unsigned long unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static phys_addr_t am33xx_do_wfi_sram_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static struct gen_pool *sram_pool, *sram_pool_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static unsigned long ocmcram_location, ocmcram_location_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static struct rtc_device *omap_rtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static void __iomem *gic_dist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static struct am33xx_pm_platform_data *pm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static struct am33xx_pm_sram_addr *pm_sram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static struct device *pm33xx_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static struct wkup_m3_ipc *m3_ipc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #ifdef CONFIG_SUSPEND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int rtc_only_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static int retrigger_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static unsigned long suspend_wfi_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static struct wkup_m3_wakeup_src wakeup_src = {.irq_nr = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) .src = "Unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static struct wkup_m3_wakeup_src rtc_alarm_wakeup = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) .irq_nr = 108, .src = "RTC Alarm",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static struct wkup_m3_wakeup_src rtc_ext_wakeup = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) .irq_nr = 0, .src = "Ext wakeup",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static u32 sram_suspend_address(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return ((unsigned long)am33xx_do_wfi_sram +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) AMX3_PM_SRAM_SYMBOL_OFFSET(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static int am33xx_push_sram_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct am33xx_pm_ro_sram_data ro_sram_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u32 table_addr, ro_data_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) void *copy_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) ro_sram_data.amx3_pm_sram_data_virt = ocmcram_location_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) ro_sram_data.amx3_pm_sram_data_phys =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) gen_pool_virt_to_phys(sram_pool_data, ocmcram_location_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ro_sram_data.rtc_base_virt = rtc_base_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /* Save physical address to calculate resume offset during pm init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) am33xx_do_wfi_sram_phys = gen_pool_virt_to_phys(sram_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) ocmcram_location);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) am33xx_do_wfi_sram = sram_exec_copy(sram_pool, (void *)ocmcram_location,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) pm_sram->do_wfi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *pm_sram->do_wfi_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (!am33xx_do_wfi_sram) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) dev_err(pm33xx_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) "PM: %s: am33xx_do_wfi copy to sram failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) table_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) sram_suspend_address((unsigned long)pm_sram->emif_sram_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ret = ti_emif_copy_pm_function_table(sram_pool, (void *)table_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) dev_dbg(pm33xx_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) "PM: %s: EMIF function copy failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ro_data_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) sram_suspend_address((unsigned long)pm_sram->ro_sram_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) copy_addr = sram_exec_copy(sram_pool, (void *)ro_data_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) &ro_sram_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) sizeof(ro_sram_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (!copy_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) dev_err(pm33xx_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) "PM: %s: ro_sram_data copy to sram failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static int am33xx_do_sram_idle(u32 wfi_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (!m3_ipc || !pm_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (wfi_flags & WFI_FLAG_WAKE_M3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return pm_ops->cpu_suspend(am33xx_do_wfi_sram, wfi_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static int __init am43xx_map_gic(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) gic_dist_base = ioremap(AM43XX_GIC_DIST_BASE, SZ_4K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (!gic_dist_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #ifdef CONFIG_SUSPEND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static struct wkup_m3_wakeup_src rtc_wake_src(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) i = __raw_readl(rtc_base_virt + 0x44) & 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) retrigger_irq = rtc_alarm_wakeup.irq_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return rtc_alarm_wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) retrigger_irq = rtc_ext_wakeup.irq_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return rtc_ext_wakeup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static int am33xx_rtc_only_idle(unsigned long wfi_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) omap_rtc_power_off_program(&omap_rtc->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) am33xx_do_wfi_sram(wfi_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * Note that the RTC module clock must be re-enabled only for rtc+ddr suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * And looks like the module can stay in SYSC_IDLE_SMART_WKUP mode configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * by the interconnect code just fine for both rtc+ddr suspend and retention
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * suspend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static int am33xx_pm_suspend(suspend_state_t suspend_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (suspend_state == PM_SUSPEND_MEM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) pm_ops->check_off_mode_enable()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ret = clk_prepare_enable(rtc_fck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) dev_err(pm33xx_dev, "Failed to enable clock: %i\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) pm_ops->save_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) suspend_wfi_flags |= WFI_FLAG_RTC_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) clk_save_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ret = pm_ops->soc_suspend(suspend_state, am33xx_rtc_only_idle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) suspend_wfi_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) suspend_wfi_flags &= ~WFI_FLAG_RTC_ONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) dev_info(pm33xx_dev, "Entering RTC Only mode with DDR in self-refresh\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) clk_restore_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) pm_ops->restore_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) m3_ipc->ops->set_rtc_only(m3_ipc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) am33xx_push_sram_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ret = pm_ops->soc_suspend(suspend_state, am33xx_do_wfi_sram,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) suspend_wfi_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) dev_err(pm33xx_dev, "PM: Kernel suspend failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) i = m3_ipc->ops->request_pm_status(m3_ipc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) switch (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dev_info(pm33xx_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) "PM: Successfully put all powerdomains to target state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) dev_err(pm33xx_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) "PM: Could not transition all powerdomains to target state\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) dev_err(pm33xx_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) "PM: CM3 returned unknown result = %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* print the wakeup reason */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (rtc_only_idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) wakeup_src = rtc_wake_src();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) pr_info("PM: Wakeup source %s\n", wakeup_src.src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) pr_info("PM: Wakeup source %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) m3_ipc->ops->request_wake_src(m3_ipc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (suspend_state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) clk_disable_unprepare(rtc_fck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static int am33xx_pm_enter(suspend_state_t suspend_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) switch (suspend_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) case PM_SUSPEND_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) case PM_SUSPEND_STANDBY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ret = am33xx_pm_suspend(suspend_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static int am33xx_pm_begin(suspend_state_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct nvmem_device *nvmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (state == PM_SUSPEND_MEM && pm_ops->check_off_mode_enable()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) nvmem = devm_nvmem_device_get(&omap_rtc->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) "omap_rtc_scratch0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (!IS_ERR(nvmem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) (void *)&rtc_magic_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) rtc_only_idle = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) rtc_only_idle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) pm_ops->begin_suspend();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) case PM_SUSPEND_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_DEEPSLEEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) case PM_SUSPEND_STANDBY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_STANDBY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static void am33xx_pm_end(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct nvmem_device *nvmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) nvmem = devm_nvmem_device_get(&omap_rtc->dev, "omap_rtc_scratch0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (IS_ERR(nvmem))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) m3_ipc->ops->finish_low_power(m3_ipc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (rtc_only_idle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (retrigger_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * 32 bits of Interrupt Set-Pending correspond to 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * 32 interrupts. Compute the bit offset of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * Interrupt and set that particular bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * Compute the register offset by dividing interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * number by 32 and mutiplying by 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) writel_relaxed(1 << (retrigger_irq & 31),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) gic_dist_base + GIC_INT_SET_PENDING_BASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) + retrigger_irq / 32 * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4, 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) (void *)&val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) rtc_only_idle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) pm_ops->finish_suspend();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static int am33xx_pm_valid(suspend_state_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) case PM_SUSPEND_STANDBY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) case PM_SUSPEND_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static const struct platform_suspend_ops am33xx_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) .begin = am33xx_pm_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) .end = am33xx_pm_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) .enter = am33xx_pm_enter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) .valid = am33xx_pm_valid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) #endif /* CONFIG_SUSPEND */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static void am33xx_pm_set_ipc_ops(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) u32 resume_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) int temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) temp = ti_emif_get_mem_type();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (temp < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) dev_err(pm33xx_dev, "PM: Cannot determine memory type, no PM available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) m3_ipc->ops->set_mem_type(m3_ipc, temp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* Physical resume address to be used by ROM code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) resume_address = am33xx_do_wfi_sram_phys +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) *pm_sram->resume_offset + 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) m3_ipc->ops->set_resume_address(m3_ipc, (void *)resume_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static void am33xx_pm_free_sram(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) gen_pool_free(sram_pool_data, ocmcram_location_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) sizeof(struct am33xx_pm_ro_sram_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * Push the minimal suspend-resume code to SRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) static int am33xx_pm_alloc_sram(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) np = of_find_compatible_node(NULL, NULL, "ti,omap3-mpu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (!np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (!np) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) dev_err(pm33xx_dev, "PM: %s: Unable to find device node for mpu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) sram_pool = of_gen_pool_get(np, "pm-sram", 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (!sram_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) dev_err(pm33xx_dev, "PM: %s: Unable to get sram pool for ocmcram\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) goto mpu_put_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) sram_pool_data = of_gen_pool_get(np, "pm-sram", 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (!sram_pool_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) dev_err(pm33xx_dev, "PM: %s: Unable to get sram data pool for ocmcram\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) goto mpu_put_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) ocmcram_location = gen_pool_alloc(sram_pool, *pm_sram->do_wfi_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!ocmcram_location) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) dev_err(pm33xx_dev, "PM: %s: Unable to allocate memory from ocmcram\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) goto mpu_put_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) ocmcram_location_data = gen_pool_alloc(sram_pool_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) sizeof(struct emif_regs_amx3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!ocmcram_location_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) dev_err(pm33xx_dev, "PM: Unable to allocate memory from ocmcram\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) gen_pool_free(sram_pool, ocmcram_location, *pm_sram->do_wfi_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) mpu_put_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) of_node_put(np);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static int am33xx_pm_rtc_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct device_node *np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) unsigned long val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct nvmem_device *nvmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) np = of_find_node_by_name(NULL, "rtc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (of_device_is_available(np)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /* RTC interconnect target module clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) rtc_fck = of_clk_get_by_name(np->parent, "fck");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (IS_ERR(rtc_fck))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return PTR_ERR(rtc_fck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) rtc_base_virt = of_iomap(np, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (!rtc_base_virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) pr_warn("PM: could not iomap rtc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) error = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) goto err_clk_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) omap_rtc = rtc_class_open("rtc0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (!omap_rtc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) pr_warn("PM: rtc0 not available");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) error = -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) goto err_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) nvmem = devm_nvmem_device_get(&omap_rtc->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) "omap_rtc_scratch0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (!IS_ERR(nvmem)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) nvmem_device_read(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 4, (void *)&rtc_magic_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if ((rtc_magic_val & 0xffff) != RTC_REG_BOOT_MAGIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) pr_warn("PM: bootloader does not support rtc-only!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) nvmem_device_write(nvmem, RTC_SCRATCH_MAGIC_REG * 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 4, (void *)&val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) val = pm_sram->resume_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) nvmem_device_write(nvmem, RTC_SCRATCH_RESUME_REG * 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 4, (void *)&val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) pr_warn("PM: no-rtc available, rtc-only mode disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) err_iounmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) iounmap(rtc_base_virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) err_clk_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) clk_put(rtc_fck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static int am33xx_pm_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (!of_machine_is_compatible("ti,am33xx") &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) !of_machine_is_compatible("ti,am43"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) pm_ops = dev->platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (!pm_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) dev_err(dev, "PM: Cannot get core PM ops!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) ret = am43xx_map_gic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) pr_err("PM: Could not ioremap GIC base\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) pm_sram = pm_ops->get_sram_addrs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (!pm_sram) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) dev_err(dev, "PM: Cannot get PM asm function addresses!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) m3_ipc = wkup_m3_ipc_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (!m3_ipc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) pr_err("PM: Cannot get wkup_m3_ipc handle\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return -EPROBE_DEFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) pm33xx_dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ret = am33xx_pm_alloc_sram();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ret = am33xx_pm_rtc_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) goto err_free_sram;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ret = am33xx_push_sram_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) goto err_unsetup_rtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) am33xx_pm_set_ipc_ops();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) #ifdef CONFIG_SUSPEND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) suspend_set_ops(&am33xx_pm_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * For a system suspend we must flush the caches, we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * the DDR in self-refresh, we want to save the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * of the EMIF, and we want the wkup_m3 to handle low-power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) suspend_wfi_flags |= WFI_FLAG_FLUSH_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) suspend_wfi_flags |= WFI_FLAG_SELF_REFRESH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) suspend_wfi_flags |= WFI_FLAG_SAVE_EMIF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) suspend_wfi_flags |= WFI_FLAG_WAKE_M3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) #endif /* CONFIG_SUSPEND */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ret = pm_ops->init(am33xx_do_sram_idle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) dev_err(dev, "Unable to call core pm init!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) goto err_put_wkup_m3_ipc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) err_put_wkup_m3_ipc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) wkup_m3_ipc_put(m3_ipc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) err_unsetup_rtc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) iounmap(rtc_base_virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) clk_put(rtc_fck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) err_free_sram:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) am33xx_pm_free_sram();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) pm33xx_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static int am33xx_pm_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (pm_ops->deinit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) pm_ops->deinit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) suspend_set_ops(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) wkup_m3_ipc_put(m3_ipc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) am33xx_pm_free_sram();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) iounmap(rtc_base_virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) clk_put(rtc_fck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static struct platform_driver am33xx_pm_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) .name = "pm33xx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) .probe = am33xx_pm_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) .remove = am33xx_pm_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) module_platform_driver(am33xx_pm_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) MODULE_ALIAS("platform:pm33xx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) MODULE_DESCRIPTION("am33xx power management driver");