Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * Low level PM code for TI EMIF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *	Dave Gerlach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * modify it under the terms of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * published by the Free Software Foundation version 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * kind, whether express or implied; without even the implied warranty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * GNU General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "emif.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "ti-emif-asm-offsets.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES	0x00a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define EMIF_POWER_MGMT_SR_TIMER_MASK			0x00f0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define EMIF_POWER_MGMT_SELF_REFRESH_MODE		0x0200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK		0x0700
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define EMIF_SDCFG_TYPE_DDR2				0x2 << SDRAM_TYPE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define EMIF_SDCFG_TYPE_DDR3				0x3 << SDRAM_TYPE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define EMIF_STATUS_READY				0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define AM43XX_EMIF_PHY_CTRL_REG_COUNT                  0x120
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define EMIF_AM437X_REGISTERS				0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	.arm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	.align 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) ENTRY(ti_emif_sram)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * void ti_emif_save_context(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * Used during suspend to save the context of all required EMIF registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * to local memory if the EMIF is going to lose context during the sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * transition. Operates on the VIRTUAL address of the EMIF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) ENTRY(ti_emif_save_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	stmfd   sp!, {r4 - r11, lr}     @ save registers on stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	adr	r4, ti_emif_pm_sram_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	ldr	r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	ldr	r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	/* Save EMIF configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	ldr	r1, [r0, #EMIF_SDRAM_CONFIG]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	str	r1, [r2, #EMIF_SDCFG_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	ldr	r1, [r0, #EMIF_SDRAM_REFRESH_CONTROL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	str	r1, [r2, #EMIF_REF_CTRL_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	ldr	r1, [r0, #EMIF_SDRAM_TIMING_1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	str     r1, [r2, #EMIF_TIMING1_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	ldr	r1, [r0, #EMIF_SDRAM_TIMING_2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	str     r1, [r2, #EMIF_TIMING2_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	ldr	r1, [r0, #EMIF_SDRAM_TIMING_3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	str     r1, [r2, #EMIF_TIMING3_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	ldr	r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	str     r1, [r2, #EMIF_PMCR_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	ldr	r1, [r0, #EMIF_POWER_MANAGEMENT_CTRL_SHDW]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	str     r1, [r2, #EMIF_PMCR_SHDW_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	ldr	r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	str     r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	ldr	r1, [r0, #EMIF_DDR_PHY_CTRL_1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	str     r1, [r2, #EMIF_DDR_PHY_CTLR_1_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	ldr	r1, [r0, #EMIF_COS_CONFIG]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	str     r1, [r2, #EMIF_COS_CONFIG_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	ldr	r1, [r0, #EMIF_PRIORITY_TO_CLASS_OF_SERVICE_MAPPING]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	str     r1, [r2, #EMIF_PRIORITY_TO_COS_MAPPING_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	ldr	r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_1_MAPPING]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	str     r1, [r2, #EMIF_CONNECT_ID_SERV_1_MAP_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	ldr	r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_2_MAPPING]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	str     r1, [r2, #EMIF_CONNECT_ID_SERV_2_MAP_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	ldr	r1, [r0, #EMIF_OCP_CONFIG]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	str     r1, [r2, #EMIF_OCP_CONFIG_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	ldr	r5, [r4, #EMIF_PM_CONFIG_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	cmp	r5, #EMIF_SRAM_AM43_REG_LAYOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	bne	emif_skip_save_extra_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	ldr	r1, [r0, #EMIF_READ_WRITE_LEVELING_RAMP_CONTROL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	str     r1, [r2, #EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	ldr	r1, [r0, #EMIF_READ_WRITE_EXECUTION_THRESHOLD]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	str     r1, [r2, #EMIF_RD_WR_EXEC_THRESH_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	ldr	r1, [r0, #EMIF_LPDDR2_NVM_TIMING]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	str     r1, [r2, #EMIF_LPDDR2_NVM_TIM_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	ldr	r1, [r0, #EMIF_LPDDR2_NVM_TIMING_SHDW]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	str     r1, [r2, #EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	ldr	r1, [r0, #EMIF_DLL_CALIB_CTRL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	str     r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	ldr	r1, [r0, #EMIF_DLL_CALIB_CTRL_SHDW]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	str     r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	/* Loop and save entire block of emif phy regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	mov	r5, #0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	add	r4, r2, #EMIF_EXT_PHY_CTRL_VALS_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	add	r3, r0, #EMIF_EXT_PHY_CTRL_1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ddr_phy_ctrl_save:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	ldr	r1, [r3, r5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	str	r1, [r4, r5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	add	r5, r5, #0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	cmp	r5, #AM43XX_EMIF_PHY_CTRL_REG_COUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	bne	ddr_phy_ctrl_save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) emif_skip_save_extra_regs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	ldmfd	sp!, {r4 - r11, pc}	@ restore regs and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) ENDPROC(ti_emif_save_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * void ti_emif_restore_context(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * Used during resume to restore the context of all required EMIF registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * from local memory after the EMIF has lost context during a sleep transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * Operates on the PHYSICAL address of the EMIF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ENTRY(ti_emif_restore_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	adr	r4, ti_emif_pm_sram_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	ldr	r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	ldr	r2, [r4, #EMIF_PM_REGS_PHYS_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	/* Config EMIF Timings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	ldr     r1, [r2, #EMIF_DDR_PHY_CTLR_1_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	str	r1, [r0, #EMIF_DDR_PHY_CTRL_1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	str	r1, [r0, #EMIF_DDR_PHY_CTRL_1_SHDW]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	ldr     r1, [r2, #EMIF_TIMING1_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	str	r1, [r0, #EMIF_SDRAM_TIMING_1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	str	r1, [r0, #EMIF_SDRAM_TIMING_1_SHDW]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	ldr     r1, [r2, #EMIF_TIMING2_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	str	r1, [r0, #EMIF_SDRAM_TIMING_2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	str	r1, [r0, #EMIF_SDRAM_TIMING_2_SHDW]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	ldr     r1, [r2, #EMIF_TIMING3_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	str	r1, [r0, #EMIF_SDRAM_TIMING_3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	str	r1, [r0, #EMIF_SDRAM_TIMING_3_SHDW]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	ldr     r1, [r2, #EMIF_REF_CTRL_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	str	r1, [r0, #EMIF_SDRAM_REFRESH_CONTROL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	str	r1, [r0, #EMIF_SDRAM_REFRESH_CTRL_SHDW]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	ldr     r1, [r2, #EMIF_PMCR_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	str	r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	ldr     r1, [r2, #EMIF_PMCR_SHDW_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	str	r1, [r0, #EMIF_POWER_MANAGEMENT_CTRL_SHDW]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	ldr     r1, [r2, #EMIF_COS_CONFIG_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	str	r1, [r0, #EMIF_COS_CONFIG]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	ldr     r1, [r2, #EMIF_PRIORITY_TO_COS_MAPPING_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	str	r1, [r0, #EMIF_PRIORITY_TO_CLASS_OF_SERVICE_MAPPING]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	ldr	r1, [r2, #EMIF_CONNECT_ID_SERV_1_MAP_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	str	r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_1_MAPPING]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	ldr     r1, [r2, #EMIF_CONNECT_ID_SERV_2_MAP_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	str	r1, [r0, #EMIF_CONNECTION_ID_TO_CLASS_OF_SERVICE_2_MAPPING]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	ldr     r1, [r2, #EMIF_OCP_CONFIG_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	str	r1, [r0, #EMIF_OCP_CONFIG]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	ldr	r5, [r4, #EMIF_PM_CONFIG_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	cmp	r5, #EMIF_SRAM_AM43_REG_LAYOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	bne	emif_skip_restore_extra_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	ldr     r1, [r2, #EMIF_RD_WR_LEVEL_RAMP_CTRL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	str	r1, [r0, #EMIF_READ_WRITE_LEVELING_RAMP_CONTROL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	ldr     r1, [r2, #EMIF_RD_WR_EXEC_THRESH_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	str	r1, [r0, #EMIF_READ_WRITE_EXECUTION_THRESHOLD]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	ldr     r1, [r2, #EMIF_LPDDR2_NVM_TIM_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	str	r1, [r0, #EMIF_LPDDR2_NVM_TIMING]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	ldr     r1, [r2, #EMIF_LPDDR2_NVM_TIM_SHDW_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	str	r1, [r0, #EMIF_LPDDR2_NVM_TIMING_SHDW]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	ldr     r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	str	r1, [r0, #EMIF_DLL_CALIB_CTRL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	ldr     r1, [r2, #EMIF_DLL_CALIB_CTRL_VAL_SHDW_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	str	r1, [r0, #EMIF_DLL_CALIB_CTRL_SHDW]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	ldr     r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	str	r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	/* Loop and restore entire block of emif phy regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	mov	r5, #0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	/* Load ti_emif_regs_amx3 + EMIF_EXT_PHY_CTRL_VALS_OFFSET for address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	 * to phy register save space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	add	r3, r2, #EMIF_EXT_PHY_CTRL_VALS_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	add	r4, r0, #EMIF_EXT_PHY_CTRL_1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ddr_phy_ctrl_restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	ldr	r1, [r3, r5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	str	r1, [r4, r5]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	add	r5, r5, #0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	cmp	r5, #AM43XX_EMIF_PHY_CTRL_REG_COUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	bne	ddr_phy_ctrl_restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) emif_skip_restore_extra_regs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	 * Output impedence calib needed only for DDR3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	 * but since the initial state of this will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	 * disabled for DDR2 no harm in restoring the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	 * old configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	ldr     r1, [r2, #EMIF_ZQCFG_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	str	r1, [r0, #EMIF_SDRAM_OUTPUT_IMPEDANCE_CALIBRATION_CONFIG]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	/* Write to sdcfg last for DDR2 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	ldr	r1, [r2, #EMIF_SDCFG_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	and	r2, r1, #SDRAM_TYPE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	cmp	r2, #EMIF_SDCFG_TYPE_DDR2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	streq	r1, [r0, #EMIF_SDRAM_CONFIG]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) ENDPROC(ti_emif_restore_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  * void ti_emif_run_hw_leveling(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)  * Used during resume to run hardware leveling again and restore the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  * configuration of the EMIF PHY, only for DDR3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) ENTRY(ti_emif_run_hw_leveling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	adr	r4, ti_emif_pm_sram_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	ldr	r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	ldr	r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	orr	r3, r3, #RDWRLVLFULL_START
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	ldr	r2, [r0, #EMIF_SDRAM_CONFIG]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	and	r2, r2, #SDRAM_TYPE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	cmp	r2, #EMIF_SDCFG_TYPE_DDR3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	bne	skip_hwlvl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	str	r3, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	 * If EMIF registers are touched during initial stage of HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	 * leveling sequence there will be an L3 NOC timeout error issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	 * as the EMIF will not respond, which is not fatal, but it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	 * avoidable. This small wait loop is enough time for this condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	 * to clear, even at worst case of CPU running at max speed of 1Ghz.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	mov	r2, #0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	subs	r2, r2, #0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	bne	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	/* Bit clears when operation is complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 2:	ldr     r1, [r0, #EMIF_READ_WRITE_LEVELING_CONTROL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	tst     r1, #RDWRLVLFULL_START
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	bne     2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) skip_hwlvl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ENDPROC(ti_emif_run_hw_leveling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * void ti_emif_enter_sr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  * Programs the EMIF to tell the SDRAM to enter into self-refresh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * mode during a sleep transition. Operates on the VIRTUAL address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  * of the EMIF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) ENTRY(ti_emif_enter_sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	stmfd   sp!, {r4 - r11, lr}     @ save registers on stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	adr	r4, ti_emif_pm_sram_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	ldr	r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	ldr	r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	ldr	r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	bic	r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	orr	r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	str	r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	ldmfd	sp!, {r4 - r11, pc}	@ restore regs and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ENDPROC(ti_emif_enter_sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)  * void ti_emif_exit_sr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)  * Programs the EMIF to tell the SDRAM to exit self-refresh mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)  * after a sleep transition. Operates on the PHYSICAL address of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)  * the EMIF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ENTRY(ti_emif_exit_sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	adr	r4, ti_emif_pm_sram_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	ldr	r0, [r4, #EMIF_PM_BASE_ADDR_PHYS_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	ldr	r2, [r4, #EMIF_PM_REGS_PHYS_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	 * Toggle EMIF to exit refresh mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	 * if EMIF lost context, PWR_MGT_CTRL is currently 0, writing disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	 *   (0x0), wont do diddly squat! so do a toggle from SR(0x2) to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	 *   (0x0) here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	 * *If* EMIF did not lose context, nothing broken as we write the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	 *   value(0x2) to reg before we write a disable (0x0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	ldr	r1, [r2, #EMIF_PMCR_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	bic	r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	orr	r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	str	r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	bic	r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	str	r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)         /* Wait for EMIF to become ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 1:	ldr     r1, [r0, #EMIF_STATUS]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	tst     r1, #EMIF_STATUS_READY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	beq     1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	mov	pc, lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) ENDPROC(ti_emif_exit_sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  * void ti_emif_abort_sr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)  * Disables self-refresh after a failed transition to a low-power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)  * state so the kernel can jump back to DDR and follow abort path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)  * Operates on the VIRTUAL address of the EMIF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ENTRY(ti_emif_abort_sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	stmfd   sp!, {r4 - r11, lr}     @ save registers on stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	adr	r4, ti_emif_pm_sram_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	ldr	r0, [r4, #EMIF_PM_BASE_ADDR_VIRT_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	ldr	r2, [r4, #EMIF_PM_REGS_VIRT_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	ldr	r1, [r2, #EMIF_PMCR_VAL_OFFSET]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	bic	r1, r1, #EMIF_POWER_MGMT_SELF_REFRESH_MODE_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	str	r1, [r0, #EMIF_POWER_MANAGEMENT_CONTROL]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	/* Wait for EMIF to become ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 1:	ldr     r1, [r0, #EMIF_STATUS]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	tst     r1, #EMIF_STATUS_READY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	beq     1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	ldmfd	sp!, {r4 - r11, pc}	@ restore regs and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) ENDPROC(ti_emif_abort_sr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	.align 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ENTRY(ti_emif_pm_sram_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	.space EMIF_PM_DATA_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ENTRY(ti_emif_sram_sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)         .word   . - ti_emif_save_context