^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2014 Marvell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Gregory Clement <gregory.clement@free-electrons.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * This file is licensed under the terms of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * License version 2. This program is licensed "as is" without any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * warranty of any kind, whether express or implied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) ENTRY(armada_38x_scu_power_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) mrc p15, 4, r1, c15, c0 @ get SCU base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) orr r1, r1, #0x8 @ SCU CPU Power Status Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) mrc p15, 0, r0, cr0, cr0, 5 @ get the CPU ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) and r0, r0, #15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) add r1, r1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) mov r0, #0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) strb r0, [r1] @ switch SCU power state to Normal mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) ret lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) ENDPROC(armada_38x_scu_power_up)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * This is the entry point through which CPUs exiting cpuidle deep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * idle state are going.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) ENTRY(armada_370_xp_cpu_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) ARM_BE8(setend be ) @ go BE8 if entered LE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Disable the MMU that might have been enabled in BootROM if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * this code is used in the resume path of a suspend/resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) mrc p15, 0, r1, c1, c0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) bic r1, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) mcr p15, 0, r1, c1, c0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) bl ll_add_cpu_to_smp_group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) bl ll_enable_coherency
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) b cpu_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) ENDPROC(armada_370_xp_cpu_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) ENTRY(armada_38x_cpu_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* do we need it for Armada 38x*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) ARM_BE8(setend be ) @ go BE8 if entered LE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) bl v7_invalidate_l1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) bl armada_38x_scu_power_up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) b cpu_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) ENDPROC(armada_38x_cpu_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) .global mvebu_boot_wa_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) .global mvebu_boot_wa_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* The following code will be executed from SRAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) ENTRY(mvebu_boot_wa_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) ARM_BE8(setend be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) adr r0, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) ldr r0, [r0] @ load the address of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) @ resume register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) ldr r0, [r0] @ load the value in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) @ resume register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ARM_BE8(rev r0, r0) @ the value is stored LE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) mov pc, r0 @ jump to this value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * the last word of this piece of code will be filled by the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * address of the boot address register just after being copied in SRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) .long .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) mvebu_boot_wa_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) ENDPROC(mvebu_boot_wa_end)