^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * PowerPC version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Adapted for Power Macintosh by Paul Mackerras.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Low-level exception handlers and MMU support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * rewritten by Paul Mackerras.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Copyright (C) 1996 Paul Mackerras.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * This file contains the entry point for the 64-bit kernel along
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * with some early initialization code common to all 64-bit powerpc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * variants.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/ppc_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/head-64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/hvcall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/page_64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/irqflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/kvm_book3s_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/hw_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/cputhreads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/ppc-opcode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/feature-fixups.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* The physical memory is laid out such that the secondary processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * spin code sits at 0x0000...0x00ff. On server, the vectors follow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * using the layout described in exceptions-64s.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * Entering into this code we make the following assumptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * For pSeries or server processors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * 1. The MMU is off & open firmware is running in real mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * 2. The primary CPU enters at __start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * 3. If the RTAS supports "query-cpu-stopped-state", then secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * CPUs will enter as directed by "start-cpu" RTAS call, which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * generic_secondary_smp_init, with PIR in r3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * 4. Else the secondary CPUs will enter at secondary_hold (0x60) as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * directed by the "start-cpu" RTS call, with PIR in r3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * -or- For OPAL entry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * 1. The MMU is off, processor in HV mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * 2. The primary CPU enters at 0 with device-tree in r3, OPAL base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * in r8, and entry in r9 for debugging purposes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * 3. Secondary CPUs enter as directed by OPAL_START_CPU call, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * is at generic_secondary_smp_init, with PIR in r3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * For Book3E processors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * 1. The MMU is on running in AS0 in a state defined in ePAPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * 2. The kernel is entered at __start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) OPEN_FIXED_SECTION(first_256B, 0x0, 0x100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) USE_FIXED_SECTION(first_256B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * Offsets are relative from the start of fixed section, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * first_256B starts at 0. Offsets are a bit easier to use here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * than the fixed section entry macros.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) . = 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) _GLOBAL(__start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* NOP this out unconditionally */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) FIXUP_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) b __start_initialization_multiplatform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) END_FTR_SECTION(0, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Catch branch to 0 in real mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Secondary processors spin on this value until it becomes non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * When non-zero, it contains the real address of the function the cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * should jump to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) .balign 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) .globl __secondary_hold_spinloop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) __secondary_hold_spinloop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) .8byte 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* Secondary processors write this value with their cpu # */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* after they enter the spin loop immediately below. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) .globl __secondary_hold_acknowledge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) __secondary_hold_acknowledge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .8byte 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #ifdef CONFIG_RELOCATABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* This flag is set to 1 by a loader if the kernel should run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * at the loaded address instead of the linked address. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * is used by kexec-tools to keep the the kdump kernel in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * crash_kernel region. The loader is responsible for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * observing the alignment requirement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #ifdef CONFIG_RELOCATABLE_TEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define RUN_AT_LOAD_DEFAULT 1 /* Test relocation, do not copy to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define RUN_AT_LOAD_DEFAULT 0x72756e30 /* "run0" -- relocate to 0 by default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* Do not move this variable as kexec-tools knows about it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) . = 0x5c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) .globl __run_at_load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __run_at_load:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) DEFINE_FIXED_SYMBOL(__run_at_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) .long RUN_AT_LOAD_DEFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) . = 0x60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * The following code is used to hold secondary processors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * in a spin loop after they have entered the kernel, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * before the bulk of the kernel has been relocated. This code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * is relocated to physical address 0x60 before prom_init is run.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * All of it must fit below the first exception vector at 0x100.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * Use .globl here not _GLOBAL because we want __secondary_hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * to be the actual text address, not a descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) .globl __secondary_hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) __secondary_hold:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) FIXUP_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #ifndef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) mfmsr r24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ori r24,r24,MSR_RI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) mtmsrd r24 /* RI on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Grab our physical cpu number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) mr r24,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* stash r4 for book3e */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) mr r25,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* Tell the master cpu we're here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* Relocation is off & we are located at an address less */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* than 0x100, so only need to grab low order offset. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) std r24,(ABS_ADDR(__secondary_hold_acknowledge))(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) li r26,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) tovirt(r26,r26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* All secondary cpus wait here until told to start. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 100: ld r12,(ABS_ADDR(__secondary_hold_spinloop))(r26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) cmpdi 0,r12,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) beq 100b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) tovirt(r12,r12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) mtctr r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) mr r3,r24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * it may be the case that other platforms have r4 right to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * begin with, this gives us some safety in case it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) mr r4,r25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) li r4,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* Make sure that patched code is visible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) bctr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 0: trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) CLOSE_FIXED_SECTION(first_256B)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* This value is used to mark exception frames on the stack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) .section ".toc","aw"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) exception_marker:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) .tc ID_72656773_68657265[TC],0x7265677368657265
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * On server, we include the exception vectors code here as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * relies on absolute addressing which is only possible within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * this compilation unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #ifdef CONFIG_PPC_BOOK3S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #include "exceptions-64s.S"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) OPEN_TEXT_SECTION(0x100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) USE_TEXT_SECTION()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * The booting_thread_hwid holds the thread id we want to boot in cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * hotplug case. It is set by cpu hotplug code, and is invalid by default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * The thread id is the same as the initial value of SPRN_PIR[THREAD_ID]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * bit field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) .globl booting_thread_hwid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) booting_thread_hwid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) .long INVALID_THREAD_HWID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) .align 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * start a thread in the same core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * input parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * r3 = the thread physical id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * r4 = the entry point where thread starts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) _GLOBAL(book3e_start_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) LOAD_REG_IMMEDIATE(r5, MSR_KERNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) cmpwi r3, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) beq 10f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) cmpwi r3, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) beq 11f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* If the thread id is invalid, just exit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) b 13f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) MTTMR(TMRN_IMSR0, 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) MTTMR(TMRN_INIA0, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) b 12f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) MTTMR(TMRN_IMSR1, 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) MTTMR(TMRN_INIA1, 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) li r6, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) sld r6, r6, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) mtspr SPRN_TENS, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 13:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * stop a thread in the same core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * input parameter:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * r3 = the thread physical id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) _GLOBAL(book3e_stop_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) cmpwi r3, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) beq 10f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) cmpwi r3, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) beq 10f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* If the thread id is invalid, just exit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) b 13f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) li r4, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) sld r4, r4, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) mtspr SPRN_TENC, r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 13:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) _GLOBAL(fsl_secondary_thread_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mfspr r4,SPRN_BUCSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* Enable branch prediction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) lis r3,BUCSR_INIT@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ori r3,r3,BUCSR_INIT@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) mtspr SPRN_BUCSR,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * Fix PIR to match the linear numbering in the device tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * On e6500, the reset value of PIR uses the low three bits for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * the thread within a core, and the upper bits for the core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * number. There are two threads per core, so shift everything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * but the low bit right by two bits so that the cpu numbering is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * continuous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * If the old value of BUCSR is non-zero, this thread has run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * before. Thus, we assume we are coming from kexec or a similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * scenario, and PIR is already set to the correct value. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * is a bit of a hack, but there are limited opportunities for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * getting information into the thread and the alternatives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * seemed like they'd be overkill. We can't tell just by looking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * at the old PIR value which state it's in, since the same value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * could be valid for one thread out of reset and for a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * thread in Linux.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) mfspr r3, SPRN_PIR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) cmpwi r4,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) bne 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) rlwimi r3, r3, 30, 2, 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) mtspr SPRN_PIR, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) mr r24,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* turn on 64-bit mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) bl enable_64b_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /* get a valid TOC pointer, wherever we're mapped at */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) bl relative_toc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) tovirt(r2,r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /* Book3E initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) mr r3,r24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) bl book3e_secondary_thread_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) b generic_secondary_common_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #endif /* CONFIG_PPC_BOOK3E */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * On pSeries and most other platforms, secondary processors spin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * in the following code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * At entry, r3 = this processor's number (physical cpu id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * On Book3E, r4 = 1 to indicate that the initial TLB entry for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * this core already exists (setup via some other mechanism such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * as SCOM before entry).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) _GLOBAL(generic_secondary_smp_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) FIXUP_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) mr r24,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) mr r25,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* turn on 64-bit mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) bl enable_64b_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* get a valid TOC pointer, wherever we're mapped at */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) bl relative_toc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) tovirt(r2,r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Book3E initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) mr r3,r24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) mr r4,r25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) bl book3e_secondary_core_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * After common core init has finished, check if the current thread is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * one we wanted to boot. If not, start the specified thread and stop the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * current thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) LOAD_REG_ADDR(r4, booting_thread_hwid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) lwz r3, 0(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) li r5, INVALID_THREAD_HWID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) cmpw r3, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) beq 20f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * The value of booting_thread_hwid has been stored in r3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * so make it invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) stw r5, 0(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * Get the current thread id and check if it is the one we wanted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * If not, start the one specified in booting_thread_hwid and stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * the current thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) mfspr r8, SPRN_TIR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) cmpw r3, r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) beq 20f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /* start the specified thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) LOAD_REG_ADDR(r5, fsl_secondary_thread_init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ld r4, 0(r5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) bl book3e_start_thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* stop the current thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) mr r3, r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) bl book3e_stop_thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) b 10b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) generic_secondary_common_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Set up a paca value for this processor. Since we have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * physical cpu id in r24, we need to search the pacas to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * which logical id maps to our physical one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) b kexec_wait /* wait for next kernel if !SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) LOAD_REG_ADDR(r8, paca_ptrs) /* Load paca_ptrs pointe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ld r8,0(r8) /* Get base vaddr of array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) lwz r7,0(r7) /* also the max paca allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) li r5,0 /* logical cpu id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) sldi r9,r5,3 /* get paca_ptrs[] index from cpu id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ldx r13,r9,r8 /* r13 = paca_ptrs[cpu id] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) cmpw r6,r24 /* Compare to our id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) beq 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) addi r5,r5,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) cmpw r5,r7 /* Check if more pacas exist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) blt 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) mr r3,r24 /* not found, copy phys to r3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) b kexec_wait /* next kernel might do better */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 2: SET_PACA(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) mtspr SPRN_SPRG_TLB_EXFRAME,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* From now on, r24 is expected to be logical cpuid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) mr r24,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* Create a temp kernel stack for use before relocation is on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ld r1,PACAEMERGSP(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) subi r1,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /* See if we need to call a cpu state restore handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) LOAD_REG_ADDR(r23, cur_cpu_spec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ld r23,0(r23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) ld r12,CPU_SPEC_RESTORE(r23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) cmpdi 0,r12,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) beq 3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) #ifdef PPC64_ELF_ABI_v1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ld r12,0(r12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) mtctr r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) bctrl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 3: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) lwarx r4,0,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) subi r4,r4,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) stwcx. r4,0,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) bne 3b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 4: HMT_LOW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) lbz r23,PACAPROCSTART(r13) /* Test if this processor should */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* start. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) cmpwi 0,r23,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) beq 4b /* Loop until told to go */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) sync /* order paca.run and cur_cpu_spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) isync /* In case code patching happened */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) b __secondary_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) #endif /* SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * Turn the MMU off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * Assumes we're mapped EA == RA if the MMU is on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) #ifdef CONFIG_PPC_BOOK3S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) __mmu_off:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) mfmsr r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) andi. r0,r3,MSR_IR|MSR_DR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) beqlr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) mflr r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) andc r3,r3,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) mtspr SPRN_SRR0,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) mtspr SPRN_SRR1,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) rfid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) b . /* prevent speculative execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * Here is our main kernel entry point. We support currently 2 kind of entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * depending on the value of r5.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * in r3...r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * r5 == NULL -> kexec style entry. r3 is a physical pointer to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * DT block, r4 is a physical pointer to the kernel itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) __start_initialization_multiplatform:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /* Make sure we are running in 64 bits mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) bl enable_64b_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* Get TOC pointer (current runtime address) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) bl relative_toc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* find out where we are now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) bcl 20,31,$+4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 0: mflr r26 /* r26 = runtime addr here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) addis r26,r26,(_stext - 0b)@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) addi r26,r26,(_stext - 0b)@l /* current runtime base addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * Are we booted from a PROM Of-type client-interface ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) cmpldi cr0,r5,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) beq 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) b __boot_from_prom /* yes -> prom */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* Save parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) mr r31,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) mr r30,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* Save OPAL entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) mr r28,r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) mr r29,r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) bl start_initialization_book3e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) b __after_prom_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* Setup some critical 970 SPRs before switching MMU off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) mfspr r0,SPRN_PVR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) srwi r0,r0,16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) cmpwi r0,0x39 /* 970 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) beq 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) cmpwi r0,0x3c /* 970FX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) beq 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) cmpwi r0,0x44 /* 970MP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) beq 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) cmpwi r0,0x45 /* 970GX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) bne 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 1: bl __cpu_preinit_ppc970
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /* Switch off MMU if not already off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) bl __mmu_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) b __after_prom_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) #endif /* CONFIG_PPC_BOOK3E */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) __REF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) __boot_from_prom:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* Save parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) mr r31,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) mr r30,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) mr r29,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) mr r28,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) mr r27,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * Align the stack to 16-byte boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * Depending on the size and layout of the ELF sections in the initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * boot binary, the stack pointer may be unaligned on PowerMac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) rldicr r1,r1,0,59
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) #ifdef CONFIG_RELOCATABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* Relocate code for where we are now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) mr r3,r26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) bl relocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /* Restore parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) mr r3,r31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) mr r4,r30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) mr r5,r29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) mr r6,r28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) mr r7,r27
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /* Do all of the interaction with OF client interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) mr r8,r26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) bl prom_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /* We never return. We also hit that trap if trying to boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) __after_prom_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #ifdef CONFIG_RELOCATABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* process relocations for the final address of the kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) sldi r25,r25,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) #if defined(CONFIG_PPC_BOOK3E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) #if defined(CONFIG_PPC_BOOK3E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) tophys(r26,r26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) cmplwi cr0,r7,1 /* flagged to stay where we are ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) bne 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) add r25,r25,r26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 1: mr r3,r25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) bl relocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) #if defined(CONFIG_PPC_BOOK3E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /* IVPR needs to be set after relocation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) bl init_core_book3e
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * We need to run with _stext at physical address PHYSICAL_START.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * This will leave some code in the first 256B of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * real memory, which are reserved for software use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * Note: This process overwrites the OF exception vectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) li r3,0 /* target addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) tovirt(r3,r3) /* on booke, we already run at PAGE_OFFSET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) mr. r4,r26 /* In some cases the loader may */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) #if defined(CONFIG_PPC_BOOK3E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) tovirt(r4,r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) beq 9f /* have already put us at zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) li r6,0x100 /* Start offset, the first 0x100 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) /* bytes were copied earlier. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) #ifdef CONFIG_RELOCATABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * Check if the kernel has to be running as relocatable kernel based on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * variable __run_at_load, if it is set the kernel is treated as relocatable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * kernel, otherwise it will be moved to PHYSICAL_START
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) #if defined(CONFIG_PPC_BOOK3E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) tovirt(r26,r26) /* on booke, we already run at PAGE_OFFSET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) cmplwi cr0,r7,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) bne 3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) LOAD_REG_ADDR(r5, __end_interrupts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) LOAD_REG_ADDR(r11, _stext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) sub r5,r5,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /* just copy interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) LOAD_REG_IMMEDIATE_SYM(r5, r11, FIXED_SYMBOL_ABS_ADDR(__end_interrupts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) b 5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* # bytes of memory to copy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) lis r5,(ABS_ADDR(copy_to_here))@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) addi r5,r5,(ABS_ADDR(copy_to_here))@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) bl copy_and_flush /* copy the first n bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /* this includes the code being */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /* executed here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /* Jump to the copy of this code that we just made */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) addis r8,r3,(ABS_ADDR(4f))@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) addi r12,r8,(ABS_ADDR(4f))@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) mtctr r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) bctr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) .balign 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) p_end: .8byte _end - copy_to_here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * Now copy the rest of the kernel up to _end, add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * _end - copy_to_here to the copy limit and run again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) addis r8,r26,(ABS_ADDR(p_end))@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) ld r8,(ABS_ADDR(p_end))@l(r8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) add r5,r5,r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 5: bl copy_and_flush /* copy the rest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 9: b start_here_multiplatform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * Copy routine used to copy the kernel to start at physical address 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * and flush and invalidate the caches as needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * Note: this routine *only* clobbers r0, r6 and lr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) _GLOBAL(copy_and_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) addi r5,r5,-8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) addi r6,r6,-8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 4: li r0,8 /* Use the smallest common */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /* denominator cache line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) /* size. This results in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* extra cache line flushes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) /* but operation is correct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* Can't get cache line size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /* from NACA as it is being */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /* moved too. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) mtctr r0 /* put # words/line in ctr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 3: addi r6,r6,8 /* copy a cache line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) ldx r0,r6,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) stdx r0,r6,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) bdnz 3b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) dcbst r6,r3 /* write it to memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) icbi r6,r3 /* flush the icache line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) cmpld 0,r6,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) blt 4b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) addi r5,r5,8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) addi r6,r6,8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) .align 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) copy_to_here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) #ifdef CONFIG_PPC_PMAC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * On PowerMac, secondary processors starts from the reset vector, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * is temporarily turned into a call to one of the functions below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) .section ".text";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) .align 2 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) .globl __secondary_start_pmac_0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) __secondary_start_pmac_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) li r24,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) b 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) li r24,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) b 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) li r24,2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) b 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) li r24,3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) _GLOBAL(pmac_secondary_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* turn on 64-bit mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) bl enable_64b_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) li r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) mfspr r3,SPRN_HID4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) mtspr SPRN_HID4,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) slbia
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* get TOC pointer (real address) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) bl relative_toc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) tovirt(r2,r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* Copy some CPU settings from CPU 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) bl __restore_cpu_ppc970
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* pSeries do that early though I don't think we really need it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) mfmsr r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ori r3,r3,MSR_RI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) mtmsrd r3 /* RI on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) /* Set up a paca value for this processor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) LOAD_REG_ADDR(r4,paca_ptrs) /* Load paca pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) ld r4,0(r4) /* Get base vaddr of paca_ptrs array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) sldi r5,r24,3 /* get paca_ptrs[] index from cpu id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) ldx r13,r5,r4 /* r13 = paca_ptrs[cpu id] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) SET_PACA(r13) /* Save vaddr of paca in an SPRG*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* Mark interrupts soft and hard disabled (they might be enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * in the PACA when doing hotplug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) li r0,IRQS_DISABLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) stb r0,PACAIRQSOFTMASK(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) li r0,PACA_IRQ_HARD_DIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) stb r0,PACAIRQHAPPENED(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) /* Create a temp kernel stack for use before relocation is on. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) ld r1,PACAEMERGSP(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) subi r1,r1,STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) b __secondary_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) #endif /* CONFIG_PPC_PMAC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * This function is called after the master CPU has released the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * secondary processors. The execution environment is relocation off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * The paca for this processor has the following fields initialized at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * this point:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * 1. Processor number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * 2. Segment table pointer (virtual address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * On entry the following are set:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * r1 = stack pointer (real addr of temp stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * r24 = cpu# (in Linux terms)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * r13 = paca virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * SPRG_PACA = paca virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) .section ".text";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) .align 2 ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) .globl __secondary_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) __secondary_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* Set thread priority to MEDIUM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) HMT_MEDIUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * Do early setup for this CPU, in particular initialising the MMU so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * can turn it on below. This is a call to C, which is OK, we're still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * running on the emergency stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) bl early_setup_secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * The primary has initialized our kernel stack for us in the paca, grab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * it and put it in r1. We must *not* use it until we turn on the MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * below, because it may not be inside the RMO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) ld r1, PACAKSAVE(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Clear backchain so we get nice backtraces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) li r7,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) mtlr r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /* Mark interrupts soft and hard disabled (they might be enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * in the PACA when doing hotplug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) li r7,IRQS_DISABLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) stb r7,PACAIRQSOFTMASK(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) li r0,PACA_IRQ_HARD_DIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) stb r0,PACAIRQHAPPENED(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /* enable MMU and jump to start_secondary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) LOAD_REG_ADDR(r3, start_secondary_prolog)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) mtspr SPRN_SRR0,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) mtspr SPRN_SRR1,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) b . /* prevent speculative execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * Running with relocation on at this point. All we want to do is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * zero the stack back-chain pointer and get the TOC virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * before going into C code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) start_secondary_prolog:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) ld r2,PACATOC(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) li r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) std r3,0(r1) /* Zero the stack frame pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) bl start_secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) b .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * Reset stack pointer and call start_secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * to continue with online operation when woken up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * from cede in cpu offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) _GLOBAL(start_secondary_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) li r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) std r3,0(r1) /* Zero the stack frame pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) bl start_secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) b .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * This subroutine clobbers r11 and r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) enable_64b_mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) mfmsr r11 /* grab the current MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) mtmsr r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) #else /* CONFIG_PPC_BOOK3E */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) li r12,(MSR_64BIT | MSR_ISF)@highest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) sldi r12,r12,48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) or r11,r11,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) mtmsrd r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * This puts the TOC pointer into r2, offset by 0x8000 (as expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * by the toolchain). It computes the correct value for wherever we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * are running at the moment, using position-independent code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * Note: The compiler constructs pointers using offsets from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * TOC in -mcmodel=medium mode. After we relocate to 0 but before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * the MMU is on we need our TOC to be a virtual address otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * these pointers will be real addresses which may get stored and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * accessed later with the MMU on. We use tovirt() at the call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * sites to handle this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) _GLOBAL(relative_toc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) mflr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) bcl 20,31,$+4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 0: mflr r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) ld r2,(p_toc - 0b)(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) add r2,r2,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) mtlr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) .balign 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) p_toc: .8byte __toc_start + 0x8000 - 0b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * This is where the main kernel code starts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) __REF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) start_here_multiplatform:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) /* set up the TOC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) bl relative_toc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) tovirt(r2,r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /* Clear out the BSS. It may have been done in prom_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * already but that's irrelevant since prom_init will soon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * be detached from the kernel completely. Besides, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * to clear it now for kexec-style entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) LOAD_REG_ADDR(r11,__bss_stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) LOAD_REG_ADDR(r8,__bss_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) sub r11,r11,r8 /* bss size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) addi r11,r11,7 /* round up to an even double word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) srdi. r11,r11,3 /* shift right by 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) beq 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) addi r8,r8,-8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) li r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) mtctr r11 /* zero this many doublewords */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) 3: stdu r0,8(r8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) bdnz 3b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /* Setup OPAL entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) LOAD_REG_ADDR(r11, opal)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) std r28,0(r11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) std r29,8(r11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) #ifndef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) mfmsr r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) ori r6,r6,MSR_RI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) mtmsrd r6 /* RI on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) #ifdef CONFIG_RELOCATABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /* Save the physical address we're running at in kernstart_addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) LOAD_REG_ADDR(r4, kernstart_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) clrldi r0,r25,2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) std r0,0(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* set up a stack pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) LOAD_REG_ADDR(r3,init_thread_union)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) LOAD_REG_IMMEDIATE(r1,THREAD_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) add r1,r3,r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) li r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) stdu r0,-STACK_FRAME_OVERHEAD(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * Do very early kernel initializations, including initial hash table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * and SLB setup before we turn on relocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /* Restore parameters passed from prom_init/kexec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) mr r3,r31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) LOAD_REG_ADDR(r12, DOTSYM(early_setup))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) mtctr r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) bctrl /* also sets r13 and SPRG_PACA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) LOAD_REG_ADDR(r3, start_here_common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) ld r4,PACAKMSR(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) mtspr SPRN_SRR0,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) mtspr SPRN_SRR1,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) b . /* prevent speculative execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) /* This is where all platforms converge execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) start_here_common:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /* relocation is on at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) std r1,PACAKSAVE(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) /* Load the TOC (virtual address) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) ld r2,PACATOC(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /* Mark interrupts soft and hard disabled (they might be enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * in the PACA when doing hotplug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) li r0,IRQS_DISABLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) stb r0,PACAIRQSOFTMASK(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) li r0,PACA_IRQ_HARD_DIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) stb r0,PACAIRQHAPPENED(r13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /* Generic kernel entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) bl start_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /* Not reached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) 0: trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * We put a few things here that have to be page-aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * This stuff goes at the beginning of the bss, which is page-aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) .section ".bss"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * pgd dir should be aligned to PGD_TABLE_SIZE which is 64K.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * We will need to find a better way to fix this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) .align 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) .globl swapper_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) swapper_pg_dir:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) .space PGD_TABLE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) .globl empty_zero_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) empty_zero_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) .space PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) EXPORT_SYMBOL(empty_zero_page)