^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Hibernate low-level support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2016 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: James Morse <james.morse@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/assembler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/virt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * To prevent the possibility of old and new partial table walks being visible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * in the tlb, switch the ttbr to a zero page when we invalidate the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Even switching to our copied tables will cause a changed output address at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * each stage of the walk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) .macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) phys_to_ttbr \tmp, \zero_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) msr ttbr1_el1, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) tlbi vmalle1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) dsb nsh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) phys_to_ttbr \tmp, \page_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) offset_ttbr1 \tmp, \tmp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) msr ttbr1_el1, \tmp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * Resume from hibernate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * Loads temporary page tables then restores the memory image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Finally branches to cpu_resume() to restore the state saved by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * swsusp_arch_suspend().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Because this code has to be copied to a 'safe' page, it can't call out to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * other functions by PC-relative address. Also remember that it may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * mid-way through over-writing other functions. For this reason it contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * code from flush_icache_range() and uses the copy_page() macro.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * This 'safe' page is mapped via ttbr0, and executed from there. This function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * switches to a copy of the linear map in ttbr1, performs the restore, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * switches ttbr1 to the original kernel's swapper_pg_dir.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * All of memory gets written to, including code. We need to clean the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * text to the Point of Coherence (PoC) before secondary cores can be booted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Because the kernel modules and executable pages mapped to user space are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * also written as data, we clean all pages we touch to the Point of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Unification (PoU).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * x0: physical address of temporary page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * x1: physical address of swapper page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * x2: address of cpu_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * x3: linear map address of restore_pblist in the current kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * x4: physical address of __hyp_stub_vectors, or 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * x5: physical address of a zero page that remains zero after resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) .pushsection ".hibernate_exit.text", "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) SYM_CODE_START(swsusp_arch_suspend_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * We execute from ttbr0, change ttbr1 to our copied linear map tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * with a break-before-make via the zero page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) break_before_make_ttbr_switch x5, x0, x6, x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) mov x21, x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) mov x30, x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) mov x24, x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) mov x25, x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* walk the restore_pblist and use copy_page() to over-write memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) mov x19, x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) 1: ldr x10, [x19, #HIBERN_PBE_ORIG]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) mov x0, x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ldr x1, [x19, #HIBERN_PBE_ADDR]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) add x1, x10, #PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Clean the copied page to PoU - based on flush_icache_range() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) raw_dcache_line_size x2, x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) sub x3, x2, #1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) bic x4, x10, x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) 2: dc cvau, x4 /* clean D line / unified line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) add x4, x4, x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) cmp x4, x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) b.lo 2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ldr x19, [x19, #HIBERN_PBE_NEXT]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) cbnz x19, 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) dsb ish /* wait for PoU cleaning to finish */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* switch to the restored kernels page tables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) break_before_make_ttbr_switch x25, x21, x6, x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) ic ialluis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) dsb ish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) isb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) cbz x24, 3f /* Do we need to re-initialise EL2? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) hvc #0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 3: ret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) SYM_CODE_END(swsusp_arch_suspend_exit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * Restore the hyp stub.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * This must be done before the hibernate page is unmapped by _cpu_resume(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * but happens before any of the hyp-stub's code is cleaned to PoC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * x24: The physical address of __hyp_stub_vectors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) SYM_CODE_START_LOCAL(el1_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) msr vbar_el2, x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) eret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) SYM_CODE_END(el1_sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) .macro invalid_vector label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) SYM_CODE_START_LOCAL(\label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) b \label
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) SYM_CODE_END(\label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) .endm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) invalid_vector el2_sync_invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) invalid_vector el2_irq_invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) invalid_vector el2_fiq_invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) invalid_vector el2_error_invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) invalid_vector el1_sync_invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) invalid_vector el1_irq_invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) invalid_vector el1_fiq_invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) invalid_vector el1_error_invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* el2 vectors - switch el2 here while we restore the memory image. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) .align 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) SYM_CODE_START(hibernate_el2_vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) ventry el2_sync_invalid // Synchronous EL2t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ventry el2_irq_invalid // IRQ EL2t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ventry el2_fiq_invalid // FIQ EL2t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ventry el2_error_invalid // Error EL2t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ventry el2_sync_invalid // Synchronous EL2h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) ventry el2_irq_invalid // IRQ EL2h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ventry el2_fiq_invalid // FIQ EL2h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ventry el2_error_invalid // Error EL2h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ventry el1_sync // Synchronous 64-bit EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ventry el1_irq_invalid // IRQ 64-bit EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) ventry el1_fiq_invalid // FIQ 64-bit EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ventry el1_error_invalid // Error 64-bit EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ventry el1_sync_invalid // Synchronous 32-bit EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ventry el1_irq_invalid // IRQ 32-bit EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ventry el1_fiq_invalid // FIQ 32-bit EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ventry el1_error_invalid // Error 32-bit EL1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) SYM_CODE_END(hibernate_el2_vectors)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) .popsection