^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * (C) Copyright 2007
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Texas Instruments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Karthik Dasu <karthik-dp@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * (C) Copyright 2004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Texas Instruments, <www.ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Richard Woodruff <r-woodruff2@ti.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "omap34xx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "iomap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "cm3xxx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "prm3xxx.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "sdrc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "sram.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "control.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Registers access definitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define SDRC_SCRATCHPAD_SEM_OFFS 0xc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define SDRC_SCRATCHPAD_SEM_V OMAP343X_SCRATCHPAD_REGADDR\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) (SDRC_SCRATCHPAD_SEM_OFFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define PM_PREPWSTST_CORE_P OMAP3430_PRM_BASE + CORE_MOD +\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) OMAP3430_PM_PREPWSTST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define PM_PWSTCTRL_MPU_P OMAP3430_PRM_BASE + MPU_MOD + OMAP2_PM_PWSTCTRL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define CM_IDLEST1_CORE_V OMAP34XX_CM_REGADDR(CORE_MOD, CM_IDLEST1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define CM_IDLEST_CKGEN_V OMAP34XX_CM_REGADDR(PLL_MOD, CM_IDLEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define SRAM_BASE_P OMAP3_SRAM_PA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define CONTROL_STAT OMAP343X_CTRL_BASE + OMAP343X_CONTROL_STATUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define CONTROL_MEM_RTA_CTRL (OMAP343X_CTRL_BASE +\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) OMAP36XX_CONTROL_MEM_RTA_CTRL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Move this as correct place is available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define SCRATCHPAD_MEM_OFFS 0x310
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define SCRATCHPAD_BASE_P (OMAP343X_CTRL_BASE +\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) OMAP343X_CONTROL_MEM_WKUP +\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) SCRATCHPAD_MEM_OFFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define SDRC_POWER_V OMAP34XX_SDRC_REGADDR(SDRC_POWER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define SDRC_SYSCONFIG_P (OMAP343X_SDRC_BASE + SDRC_SYSCONFIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define SDRC_MR_0_P (OMAP343X_SDRC_BASE + SDRC_MR_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define SDRC_EMR2_0_P (OMAP343X_SDRC_BASE + SDRC_EMR2_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define SDRC_MANUAL_0_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define SDRC_MR_1_P (OMAP343X_SDRC_BASE + SDRC_MR_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define SDRC_EMR2_1_P (OMAP343X_SDRC_BASE + SDRC_EMR2_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define SDRC_MANUAL_1_P (OMAP343X_SDRC_BASE + SDRC_MANUAL_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define SDRC_DLLA_STATUS_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define SDRC_DLLA_CTRL_V OMAP34XX_SDRC_REGADDR(SDRC_DLLA_CTRL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * This file needs be built unconditionally as ARM to interoperate correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * with non-Thumb-2-capable firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) .arm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * API functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * L2 cache needs to be toggled for stable OFF mode functionality on 3630.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * This function sets up a flag that will allow for this toggling to take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * place on 3630. Hopefully some version in the future may not need this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ENTRY(enable_omap3630_toggle_l2_on_restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) stmfd sp!, {lr} @ save registers on stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* Setup so that we will disable and enable l2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) mov r1, #0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) adr r3, l2dis_3630_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) ldr r2, [r3] @ value for offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) str r1, [r2, r3] @ write to l2dis_3630
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) ldmfd sp!, {pc} @ restore regs and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) ENDPROC(enable_omap3630_toggle_l2_on_restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * Function to call rom code to save secure ram context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * r0 = physical address of the parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) .arch armv7-a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) .arch_extension sec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) ENTRY(save_secure_ram_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) stmfd sp!, {r4 - r11, lr} @ save registers on stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) mov r3, r0 @ physical address of parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) mov r0, #25 @ set service ID for PPA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) mov r12, r0 @ copy secure service ID in r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) mov r1, #0 @ set task id for ROM code in r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) mov r2, #4 @ set some flags in r2, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) mov r6, #0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) dsb @ data write barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) dmb @ data memory barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) smc #1 @ call SMI monitor (smi #1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ldmfd sp!, {r4 - r11, pc}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ENDPROC(save_secure_ram_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * ======================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * == Idle entry point ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * ======================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * Forces OMAP into idle state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * omap34xx_cpu_suspend() - This bit of code saves the CPU context if needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * and executes the WFI instruction. Calling WFI effectively changes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * power domains states to the desired target power states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Notes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * - only the minimum set of functions gets copied to internal SRAM at boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * and after wake-up from OFF mode, cf. omap_push_sram_idle. The function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * pointers in SDRAM or SRAM are called depending on the desired low power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * target state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * - when the OMAP wakes up it continues at different execution points
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * depending on the low power mode (non-OFF vs OFF modes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * cf. 'Resume path for xxx mode' comments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) .align 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) ENTRY(omap34xx_cpu_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) stmfd sp!, {r4 - r11, lr} @ save registers on stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * r0 contains information about saving context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * 0 - No context lost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * 1 - Only L1 and logic lost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * 2 - Only L2 lost (Even L1 is retained we clean it along with L2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * 3 - Both L1 and L2 lost and logic lost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * For OFF mode: save context and jump to WFI in SDRAM (omap3_do_wfi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * For non-OFF modes: jump to the WFI code in SRAM (omap3_do_wfi_sram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) ldr r4, omap3_do_wfi_sram_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ldr r5, [r4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) cmp r0, #0x0 @ If no context save required,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) bxeq r5 @ jump to the WFI code in SRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* Otherwise fall through to the save context code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) save_context_wfi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * jump out to kernel flush routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * - reuse that code is better
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * - it executes in a cached space so is faster than refetch per-block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * - should be faster and will change with kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * - 'might' have to copy address, load and jump to it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * Flush all data from the L1 data cache before disabling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * SCTLR.C bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ldr r1, kernel_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) mov lr, pc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) bx r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * Clear the SCTLR.C bit to prevent further data cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * allocation. Clearing SCTLR.C would make all the data accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * strongly ordered and would not hit the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) mrc p15, 0, r0, c1, c0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) bic r0, r0, #(1 << 2) @ Disable the C bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) mcr p15, 0, r0, c1, c0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * Invalidate L1 data cache. Even though only invalidate is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * necessary exported flush API is used here. Doing clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * on already clean cache would be almost NOP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ldr r1, kernel_flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) blx r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) b omap3_do_wfi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ENDPROC(omap34xx_cpu_suspend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) omap3_do_wfi_sram_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) .word omap3_do_wfi_sram
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) kernel_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) .word v7_flush_dcache_all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* ===================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * == WFI instruction => Enter idle ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * ===================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * Do WFI instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * Includes the resume path for non-OFF modes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * This code gets copied to internal SRAM and is accessible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * from both SDRAM and SRAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * - executed from SRAM for non-off modes (omap3_do_wfi_sram),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * - executed from SDRAM for OFF mode (omap3_do_wfi).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) .align 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ENTRY(omap3_do_wfi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ldr r4, sdrc_power @ read the SDRC_POWER register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) ldr r5, [r4] @ read the contents of SDRC_POWER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) orr r5, r5, #0x40 @ enable self refresh on idle req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) str r5, [r4] @ write back to SDRC_POWER register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* Data memory barrier and Data sync barrier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) dsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) dmb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * ===================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * == WFI instruction => Enter idle ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * ===================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) wfi @ wait for interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * ===================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * == Resume path for non-OFF modes ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * ===================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * This function implements the erratum ID i581 WA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * SDRC state restore before accessing the SDRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * Only used at return from non-OFF mode. For OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * mode the ROM code configures the SDRC and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * the DPLL before calling the restore code directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * from DDR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* Make sure SDRC accesses are ok */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) wait_sdrc_ok:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ldr r4, cm_idlest_ckgen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) wait_dpll3_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ldr r5, [r4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) tst r5, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) beq wait_dpll3_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ldr r4, cm_idlest1_core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) wait_sdrc_ready:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ldr r5, [r4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) tst r5, #0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) bne wait_sdrc_ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* allow DLL powerdown upon hw idle req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) ldr r4, sdrc_power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ldr r5, [r4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) bic r5, r5, #0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) str r5, [r4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) is_dll_in_lock_mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* Is dll in lock mode? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ldr r4, sdrc_dlla_ctrl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ldr r5, [r4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) tst r5, #0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) bne exit_nonoff_modes @ Return if locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* wait till dll locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) wait_dll_lock_timed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ldr r4, sdrc_dlla_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* Wait 20uS for lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) mov r6, #8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) wait_dll_lock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) subs r6, r6, #0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) beq kick_dll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) ldr r5, [r4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) and r5, r5, #0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) cmp r5, #0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) bne wait_dll_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) b exit_nonoff_modes @ Return when locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* disable/reenable DLL if not locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) kick_dll:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) ldr r4, sdrc_dlla_ctrl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ldr r5, [r4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) mov r6, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) bic r6, #(1<<3) @ disable dll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) str r6, [r4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) dsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) orr r6, r6, #(1<<3) @ enable dll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) str r6, [r4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) dsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) b wait_dll_lock_timed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) exit_nonoff_modes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Re-enable C-bit if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) mrc p15, 0, r0, c1, c0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) tst r0, #(1 << 2) @ Check C bit enabled?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) mcreq p15, 0, r0, c1, c0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * ===================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * == Exit point from non-OFF modes ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * ===================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ldmfd sp!, {r4 - r11, pc} @ restore regs and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ENDPROC(omap3_do_wfi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) sdrc_power:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) .word SDRC_POWER_V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) cm_idlest1_core:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) .word CM_IDLEST1_CORE_V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) cm_idlest_ckgen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) .word CM_IDLEST_CKGEN_V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) sdrc_dlla_status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) .word SDRC_DLLA_STATUS_V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) sdrc_dlla_ctrl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) .word SDRC_DLLA_CTRL_V
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ENTRY(omap3_do_wfi_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) .word . - omap3_do_wfi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * ==============================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * == Resume path for OFF mode ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * ==============================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * The restore_* functions are called by the ROM code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * when back from WFI in OFF mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * Cf. the get_*restore_pointer functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * restore_es3: applies to 34xx >= ES3.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * restore_3630: applies to 36xx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * restore: common code for 3xxx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * Note: when back from CORE and MPU OFF mode we are running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * from SDRAM, without MMU, without the caches and prediction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * Also the SRAM content has been cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ENTRY(omap3_restore_es3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) ldr r5, pm_prepwstst_core_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ldr r4, [r5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) and r4, r4, #0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) cmp r4, #0x0 @ Check if previous power state of CORE is OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) bne omap3_restore @ Fall through to OMAP3 common code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) adr r0, es3_sdrc_fix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ldr r1, sram_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ldr r2, es3_sdrc_fix_sz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) mov r2, r2, ror #2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) copy_to_sram:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ldmia r0!, {r3} @ val = *src
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) stmia r1!, {r3} @ *dst = val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) subs r2, r2, #0x1 @ num_words--
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) bne copy_to_sram
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ldr r1, sram_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) blx r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) b omap3_restore @ Fall through to OMAP3 common code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ENDPROC(omap3_restore_es3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ENTRY(omap3_restore_3630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ldr r1, pm_prepwstst_core_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ldr r2, [r1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) and r2, r2, #0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) cmp r2, #0x0 @ Check if previous power state of CORE is OFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) bne omap3_restore @ Fall through to OMAP3 common code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* Disable RTA before giving control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ldr r1, control_mem_rta
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) mov r2, #OMAP36XX_RTA_DISABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) str r2, [r1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) ENDPROC(omap3_restore_3630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* Fall through to common code for the remaining logic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ENTRY(omap3_restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * Read the pwstctrl register to check the reason for mpu reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * This tells us what was lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) ldr r1, pm_pwstctrl_mpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ldr r2, [r1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) and r2, r2, #0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) cmp r2, #0x0 @ Check if target power state was OFF or RET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) bne logic_l1_restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) adr r1, l2dis_3630_offset @ address for offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ldr r0, [r1] @ value for offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ldr r0, [r1, r0] @ value at l2dis_3630
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) cmp r0, #0x1 @ should we disable L2 on 3630?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) bne skipl2dis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) mrc p15, 0, r0, c1, c0, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) bic r0, r0, #2 @ disable L2 cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) mcr p15, 0, r0, c1, c0, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) skipl2dis:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ldr r0, control_stat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ldr r1, [r0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) and r1, #0x700
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) cmp r1, #0x300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) beq l2_inv_gp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) adr r0, l2_inv_api_params_offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ldr r3, [r0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) add r3, r3, r0 @ r3 points to dummy parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) mov r0, #40 @ set service ID for PPA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) mov r12, r0 @ copy secure Service ID in r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) mov r1, #0 @ set task id for ROM code in r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) mov r2, #4 @ set some flags in r2, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) mov r6, #0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) dsb @ data write barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) dmb @ data memory barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) smc #1 @ call SMI monitor (smi #1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* Write to Aux control register to set some bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) mov r0, #42 @ set service ID for PPA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) mov r12, r0 @ copy secure Service ID in r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) mov r1, #0 @ set task id for ROM code in r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) mov r2, #4 @ set some flags in r2, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) mov r6, #0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ldr r4, scratchpad_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ldr r3, [r4, #0xBC] @ r3 points to parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) dsb @ data write barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dmb @ data memory barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) smc #1 @ call SMI monitor (smi #1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) #ifdef CONFIG_OMAP3_L2_AUX_SECURE_SAVE_RESTORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /* Restore L2 aux control register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) @ set service ID for PPA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) mov r0, #CONFIG_OMAP3_L2_AUX_SECURE_SERVICE_SET_ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) mov r12, r0 @ copy service ID in r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) mov r1, #0 @ set task ID for ROM code in r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) mov r2, #4 @ set some flags in r2, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) mov r6, #0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ldr r4, scratchpad_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ldr r3, [r4, #0xBC]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) adds r3, r3, #8 @ r3 points to parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) dsb @ data write barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) dmb @ data memory barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) smc #1 @ call SMI monitor (smi #1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) b logic_l1_restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) .align
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) l2_inv_api_params_offset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) .long l2_inv_api_params - .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) l2_inv_gp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* Execute smi to invalidate L2 cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) mov r12, #0x1 @ set up to invalidate L2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) smc #0 @ Call SMI monitor (smieq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /* Write to Aux control register to set some bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ldr r4, scratchpad_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ldr r3, [r4,#0xBC]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ldr r0, [r3,#4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) mov r12, #0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) smc #0 @ Call SMI monitor (smieq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ldr r4, scratchpad_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ldr r3, [r4,#0xBC]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ldr r0, [r3,#12]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) mov r12, #0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) smc #0 @ Call SMI monitor (smieq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) logic_l1_restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) adr r0, l2dis_3630_offset @ adress for offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ldr r1, [r0] @ value for offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) ldr r1, [r0, r1] @ value at l2dis_3630
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) cmp r1, #0x1 @ Test if L2 re-enable needed on 3630
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) bne skipl2reen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) mrc p15, 0, r1, c1, c0, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) orr r1, r1, #2 @ re-enable L2 cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) mcr p15, 0, r1, c1, c0, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) skipl2reen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* Now branch to the common CPU resume function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) b cpu_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ENDPROC(omap3_restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) .ltorg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * Local variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) pm_prepwstst_core_p:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) .word PM_PREPWSTST_CORE_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) pm_pwstctrl_mpu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) .word PM_PWSTCTRL_MPU_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) scratchpad_base:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) .word SCRATCHPAD_BASE_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) sram_base:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) .word SRAM_BASE_P + 0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) control_stat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) .word CONTROL_STAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) control_mem_rta:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) .word CONTROL_MEM_RTA_CTRL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) l2dis_3630_offset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) .long l2dis_3630 - .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) .data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) .align 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) l2dis_3630:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) .word 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) .data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) .align 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) l2_inv_api_params:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) .word 0x1, 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * Internal functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * Copied to and run from SRAM in order to reconfigure the SDRC parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) .align 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ENTRY(es3_sdrc_fix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) ldr r4, sdrc_syscfg @ get config addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) ldr r5, [r4] @ get value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) tst r5, #0x100 @ is part access blocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) it eq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) biceq r5, r5, #0x100 @ clear bit if set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) str r5, [r4] @ write back change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) ldr r4, sdrc_mr_0 @ get config addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ldr r5, [r4] @ get value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) str r5, [r4] @ write back change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) ldr r4, sdrc_emr2_0 @ get config addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ldr r5, [r4] @ get value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) str r5, [r4] @ write back change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) ldr r4, sdrc_manual_0 @ get config addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) mov r5, #0x2 @ autorefresh command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) str r5, [r4] @ kick off refreshes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ldr r4, sdrc_mr_1 @ get config addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) ldr r5, [r4] @ get value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) str r5, [r4] @ write back change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ldr r4, sdrc_emr2_1 @ get config addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ldr r5, [r4] @ get value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) str r5, [r4] @ write back change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) ldr r4, sdrc_manual_1 @ get config addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) mov r5, #0x2 @ autorefresh command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) str r5, [r4] @ kick off refreshes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) bx lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * Local variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) .align
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) sdrc_syscfg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) .word SDRC_SYSCONFIG_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) sdrc_mr_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) .word SDRC_MR_0_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) sdrc_emr2_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) .word SDRC_EMR2_0_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) sdrc_manual_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) .word SDRC_MANUAL_0_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) sdrc_mr_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) .word SDRC_MR_1_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) sdrc_emr2_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) .word SDRC_EMR2_1_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) sdrc_manual_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) .word SDRC_MANUAL_1_P
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) ENDPROC(es3_sdrc_fix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) ENTRY(es3_sdrc_fix_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) .word . - es3_sdrc_fix