^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Kernel execution entry point code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Initial PowerPC version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Rewritten for PReP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Low-level exception handers, MMU support, and rewrite.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * PowerPC 8xx modifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Copyright (c) 1998-1999 TiVo, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * PowerPC 403GCX modifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * PowerPC 403GCX/405GP modifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Copyright 2000 MontaVista Software Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * PPC405 modifications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * PowerPC 403GCX/405GP modifications.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Author: MontaVista Software, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * frank_rowand@mvista.com or source@mvista.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * debbie_chu@mvista.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Copyright 2002-2005 MontaVista Software, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/ppc_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/synch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/code-patching-asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "head_booke.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* As with the other PowerPC ports, it is expected that when code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * execution begins here, the following registers contain valid, yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * optional, information:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * r4 - Starting address of the init RAM disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * r5 - Ending address of the init RAM disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * r6 - Start of kernel command line string (e.g. "mem=128")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * r7 - End of kernel command line string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) __HEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) _ENTRY(_stext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) _ENTRY(_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Reserve a word at a fixed location to store the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * of abatron_pteptrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) mr r31,r3 /* save device tree ptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) li r24,0 /* CPU number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #ifdef CONFIG_RELOCATABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * Relocate ourselves to the current runtime address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * This is called only by the Boot CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * "relocate" is called with our current runtime virutal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * r21 will be loaded with the physical runtime address of _stext
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) bl 0f /* Get our runtime address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) 0: mflr r21 /* Make it accessible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) addis r21,r21,(_stext - 0b)@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) addi r21,r21,(_stext - 0b)@l /* Get our current runtime base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * We have the runtime (virutal) address of our base.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * We calculate our shift of offset from a 256M page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * We could map the 256M page we belong to at PAGE_OFFSET and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * get going from there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) lis r4,KERNELBASE@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ori r4,r4,KERNELBASE@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) rlwinm r6,r21,0,4,31 /* r6 = PHYS_START % 256M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) rlwinm r5,r4,0,4,31 /* r5 = KERNELBASE % 256M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) subf r3,r5,r6 /* r3 = r6 - r5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) add r3,r4,r3 /* Required Virutal Address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) bl relocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) bl init_cpu_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * This is where the main kernel code starts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* ptr to current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) lis r2,init_task@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ori r2,r2,init_task@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* ptr to current thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) addi r4,r2,THREAD /* init task's THREAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) mtspr SPRN_SPRG_THREAD,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) lis r1,init_thread_union@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) ori r1,r1,init_thread_union@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) li r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) bl early_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #ifdef CONFIG_RELOCATABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * Relocatable kernel support based on processing of dynamic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * relocation entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * r25 will contain RPN/ERPN for the start address of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * r21 will contain the current offset of _stext
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) lis r3,kernstart_addr@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) la r3,kernstart_addr@l(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * Compute the kernstart_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * kernstart_addr => (r6,r8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * kernstart_addr & ~0xfffffff => (r6,r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) rlwinm r6,r25,0,28,31 /* ERPN. Bits 32-35 of Address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) rlwinm r8,r21,0,4,31 /* r8 = (_stext & 0xfffffff) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) or r8,r7,r8 /* Compute the lower 32bit of kernstart_addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* Store kernstart_addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) stw r6,0(r3) /* higher 32bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) stw r8,4(r3) /* lower 32bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * Compute the virt_phys_offset :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * virt_phys_offset = stext.run - kernstart_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * When we relocate, we have :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * (kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * hence:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* KERNELBASE&~0xfffffff => (r4,r5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) li r4, 0 /* higer 32bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) lis r5,KERNELBASE@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) rlwinm r5,r5,0,0,3 /* Align to 256M, lower 32bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * 64bit subtraction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) subfc r5,r7,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) subfe r4,r6,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Store virt_phys_offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) lis r3,virt_phys_offset@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) la r3,virt_phys_offset@l(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) stw r4,0(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) stw r5,4(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #elif defined(CONFIG_DYNAMIC_MEMSTART)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Mapping based, page aligned dynamic kernel loading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * r25 will contain RPN/ERPN for the start address of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * Add the difference between KERNELBASE and PAGE_OFFSET to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * start of physical memory to get kernstart_addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) lis r3,kernstart_addr@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) la r3,kernstart_addr@l(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) lis r4,KERNELBASE@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ori r4,r4,KERNELBASE@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) lis r5,PAGE_OFFSET@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ori r5,r5,PAGE_OFFSET@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) subf r4,r5,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) rlwinm r6,r25,0,28,31 /* ERPN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) add r7,r7,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) stw r6,0(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) stw r7,4(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * Decide what sort of machine this is and initialize the MMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #ifdef CONFIG_KASAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) bl kasan_early_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) li r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) mr r4,r31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) bl machine_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) bl MMU_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* Setup PTE pointers for the Abatron bdiGDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) lis r6, swapper_pg_dir@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ori r6, r6, swapper_pg_dir@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) lis r5, abatron_pteptrs@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ori r5, r5, abatron_pteptrs@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) lis r4, KERNELBASE@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ori r4, r4, KERNELBASE@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) stw r6, 0(r5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* Clear the Machine Check Syndrome Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) li r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) mtspr SPRN_MCSR,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* Let's move on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) lis r4,start_kernel@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ori r4,r4,start_kernel@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) lis r3,MSR_KERNEL@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ori r3,r3,MSR_KERNEL@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) mtspr SPRN_SRR0,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) mtspr SPRN_SRR1,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) rfi /* change context and jump to start_kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * Interrupt vector entry code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * The Book E MMUs are always on so we don't need to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * interrupts in real mode as with previous PPC processors. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * this case we handle interrupts in the kernel virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * Interrupt vectors are dynamically placed relative to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * interrupt prefix as determined by the address of interrupt_base.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * The interrupt vectors offsets are programmed using the labels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * for each interrupt vector entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * Interrupt vectors must be aligned on a 16 byte boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * We align on a 32 byte cache line boundary for good measure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) interrupt_base:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Critical Input Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* Machine Check Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) machine_check_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* Data Storage Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) DATA_STORAGE_EXCEPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /* Instruction Storage Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) INSTRUCTION_STORAGE_EXCEPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* External Input Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) do_IRQ, EXC_XFER_LITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Alignment Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ALIGNMENT_EXCEPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /* Program Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) PROGRAM_EXCEPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* Floating Point Unavailable Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #ifdef CONFIG_PPC_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) FP_UNAVAILABLE_EXCEPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) FloatingPointUnavailable, unknown_exception, EXC_XFER_STD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /* System Call Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) START_EXCEPTION(SystemCall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* Auxiliary Processor Unavailable Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_STD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /* Decrementer Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) DECREMENTER_EXCEPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* Fixed Internal Timer Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /* TODO: Add FIT support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unknown_exception, EXC_XFER_STD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* Watchdog Timer Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /* TODO: Add watchdog support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #ifdef CONFIG_BOOKE_WDT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* Data TLB Error Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) START_EXCEPTION(DataTLBError44x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) mtspr SPRN_SPRG_WSCRATCH1, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) mtspr SPRN_SPRG_WSCRATCH2, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) mtspr SPRN_SPRG_WSCRATCH3, r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) mfcr r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) mtspr SPRN_SPRG_WSCRATCH4, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) mfspr r10, SPRN_DEAR /* Get faulting address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /* If we are faulting a kernel address, we have to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * kernel page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) lis r11, PAGE_OFFSET@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) cmplw r10, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) blt+ 3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) lis r11, swapper_pg_dir@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ori r11, r11, swapper_pg_dir@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) mfspr r12,SPRN_MMUCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) rlwinm r12,r12,0,0,23 /* Clear TID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) b 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* Get the PGD for the current thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) mfspr r11,SPRN_SPRG_THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) lwz r11,PGDIR(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Load PID into MMUCR TID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) mfspr r12,SPRN_MMUCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) mfspr r13,SPRN_PID /* Get PID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) rlwimi r12,r13,0,24,31 /* Set TID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) mtspr SPRN_MMUCR,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* Mask of required permission bits. Note that while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * do copy ESR:ST to _PAGE_RW position as trying to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * to an RO page is pretty common, we don't do it with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * _PAGE_DIRTY. We could do it, but it's a fairly rare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * event so I'd rather take the overhead when it happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * rather than adding an instruction here. We should measure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * whether the whole thing is worth it in the first place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * as we could avoid loading SPRN_ESR completely in the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * place...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * TODO: Is it worth doing that mfspr & rlwimi in the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * place or can we save a couple of instructions here ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) mfspr r12,SPRN_ESR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) li r13,_PAGE_PRESENT|_PAGE_ACCESSED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) rlwimi r13,r12,10,30,30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* Load the PTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* Compute pgdir/pmd offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) lwzx r11, r12, r11 /* Get pgd/pmd entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) beq 2f /* Bail if no table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* Compute pte address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) lwz r11, 0(r12) /* Get high word of pte entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) lwz r12, 4(r12) /* Get low word of pte entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) lis r10,tlb_44x_index@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) andc. r13,r13,r12 /* Check permission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* Load the next available TLB index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) lwz r13,tlb_44x_index@l(r10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) bne 2f /* Bail if permission mismach */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /* Increment, rollover, and store TLB index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) addi r13,r13,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) patch_site 0f, patch__tlb_44x_hwater_D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /* Compare with watermark (instruction gets patched) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 0: cmpwi 0,r13,1 /* reserve entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) ble 5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) li r13,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* Store the next available TLB index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) stw r13,tlb_44x_index@l(r10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* Re-load the faulting address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) mfspr r10,SPRN_DEAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /* Jump to common tlb load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) b finish_tlb_load_44x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* The bailout. Restore registers to pre-exception conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * and call the heavyweights to help us out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) mfspr r11, SPRN_SPRG_RSCRATCH4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) mtcr r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) mfspr r13, SPRN_SPRG_RSCRATCH3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) mfspr r12, SPRN_SPRG_RSCRATCH2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) mfspr r11, SPRN_SPRG_RSCRATCH1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) mfspr r10, SPRN_SPRG_RSCRATCH0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) b DataStorage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* Instruction TLB Error Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * Nearly the same as above, except we get our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * information from different registers and bailout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * to a different point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) START_EXCEPTION(InstructionTLBError44x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) mtspr SPRN_SPRG_WSCRATCH1, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) mtspr SPRN_SPRG_WSCRATCH2, r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) mtspr SPRN_SPRG_WSCRATCH3, r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) mfcr r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) mtspr SPRN_SPRG_WSCRATCH4, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) mfspr r10, SPRN_SRR0 /* Get faulting address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /* If we are faulting a kernel address, we have to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * kernel page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) lis r11, PAGE_OFFSET@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) cmplw r10, r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) blt+ 3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) lis r11, swapper_pg_dir@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ori r11, r11, swapper_pg_dir@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) mfspr r12,SPRN_MMUCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) rlwinm r12,r12,0,0,23 /* Clear TID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) b 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /* Get the PGD for the current thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) mfspr r11,SPRN_SPRG_THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) lwz r11,PGDIR(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* Load PID into MMUCR TID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) mfspr r12,SPRN_MMUCR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) mfspr r13,SPRN_PID /* Get PID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) rlwimi r12,r13,0,24,31 /* Set TID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) mtspr SPRN_MMUCR,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /* Make up the required permissions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /* Compute pgdir/pmd offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) lwzx r11, r12, r11 /* Get pgd/pmd entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) beq 2f /* Bail if no table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /* Compute pte address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) lwz r11, 0(r12) /* Get high word of pte entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) lwz r12, 4(r12) /* Get low word of pte entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) lis r10,tlb_44x_index@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) andc. r13,r13,r12 /* Check permission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* Load the next available TLB index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) lwz r13,tlb_44x_index@l(r10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) bne 2f /* Bail if permission mismach */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* Increment, rollover, and store TLB index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) addi r13,r13,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) patch_site 0f, patch__tlb_44x_hwater_I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /* Compare with watermark (instruction gets patched) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 0: cmpwi 0,r13,1 /* reserve entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ble 5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) li r13,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* Store the next available TLB index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) stw r13,tlb_44x_index@l(r10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /* Re-load the faulting address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) mfspr r10,SPRN_SRR0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* Jump to common TLB load point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) b finish_tlb_load_44x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /* The bailout. Restore registers to pre-exception conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * and call the heavyweights to help us out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) mfspr r11, SPRN_SPRG_RSCRATCH4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) mtcr r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) mfspr r13, SPRN_SPRG_RSCRATCH3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) mfspr r12, SPRN_SPRG_RSCRATCH2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) mfspr r11, SPRN_SPRG_RSCRATCH1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) mfspr r10, SPRN_SPRG_RSCRATCH0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) b InstructionStorage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * Both the instruction and data TLB miss get to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * point to load the TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * r10 - EA of fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * r11 - PTE high word value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * r12 - PTE low word value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * r13 - TLB index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * MMUCR - loaded with proper value when we get here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * Upon exit, we reload everything and RFI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) finish_tlb_load_44x:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* Combine RPN & ERPN an write WS 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) rlwimi r11,r12,0,0,31-PAGE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) tlbwe r11,r13,PPC44x_TLB_XLAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * Create WS1. This is the faulting address (EPN),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * page size, and valid flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* Insert valid and page size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /* And WS 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) li r10,0xf85 /* Mask to apply from PTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) and r11,r12,r10 /* Mask PTE bits to keep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) andi. r10,r12,_PAGE_USER /* User page ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) beq 1f /* nope, leave U bits empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /* Done...restore registers and get out of here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) mfspr r11, SPRN_SPRG_RSCRATCH4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) mtcr r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) mfspr r13, SPRN_SPRG_RSCRATCH3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) mfspr r12, SPRN_SPRG_RSCRATCH2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) mfspr r11, SPRN_SPRG_RSCRATCH1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) mfspr r10, SPRN_SPRG_RSCRATCH0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) rfi /* Force context change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) /* TLB error interrupts for 476
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) #ifdef CONFIG_PPC_47x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) START_EXCEPTION(DataTLBError47x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) mtspr SPRN_SPRG_WSCRATCH1,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) mtspr SPRN_SPRG_WSCRATCH2,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) mtspr SPRN_SPRG_WSCRATCH3,r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) mfcr r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) mtspr SPRN_SPRG_WSCRATCH4,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) mfspr r10,SPRN_DEAR /* Get faulting address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /* If we are faulting a kernel address, we have to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * kernel page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) lis r11,PAGE_OFFSET@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) cmplw cr0,r10,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) blt+ 3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) lis r11,swapper_pg_dir@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) ori r11,r11, swapper_pg_dir@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) li r12,0 /* MMUCR = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) b 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /* Get the PGD for the current thread and setup MMUCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 3: mfspr r11,SPRN_SPRG3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) lwz r11,PGDIR(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) mfspr r12,SPRN_PID /* Get PID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /* Mask of required permission bits. Note that while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * do copy ESR:ST to _PAGE_RW position as trying to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * to an RO page is pretty common, we don't do it with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * _PAGE_DIRTY. We could do it, but it's a fairly rare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * event so I'd rather take the overhead when it happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * rather than adding an instruction here. We should measure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * whether the whole thing is worth it in the first place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * as we could avoid loading SPRN_ESR completely in the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * place...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * TODO: Is it worth doing that mfspr & rlwimi in the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * place or can we save a couple of instructions here ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) mfspr r12,SPRN_ESR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) li r13,_PAGE_PRESENT|_PAGE_ACCESSED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) rlwimi r13,r12,10,30,30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /* Load the PTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /* Compute pgdir/pmd offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) lwzx r11,r12,r11 /* Get pgd/pmd entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /* Word 0 is EPN,V,TS,DSIZ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) li r12,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) tlbwe r10,r12,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /* XXX can we do better ? Need to make sure tlbwe has established
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * latch V bit in MMUCR0 before the PTE is loaded further down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) rlwinm. r12,r11,0,0,20 /* Extract pt base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* Compute pte address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) beq 2f /* Bail if no table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) lwz r11,0(r12) /* Get high word of pte entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * bottom of r12 to create a data dependency... We can also use r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * as destination nowadays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) lwsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) lwz r12,4(r12) /* Get low word of pte entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) andc. r13,r13,r12 /* Check permission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* Jump to common tlb load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) beq finish_tlb_load_47x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 2: /* The bailout. Restore registers to pre-exception conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * and call the heavyweights to help us out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) mfspr r11,SPRN_SPRG_RSCRATCH4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) mtcr r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) mfspr r13,SPRN_SPRG_RSCRATCH3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) mfspr r12,SPRN_SPRG_RSCRATCH2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) mfspr r11,SPRN_SPRG_RSCRATCH1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) mfspr r10,SPRN_SPRG_RSCRATCH0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) b DataStorage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* Instruction TLB Error Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * Nearly the same as above, except we get our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * information from different registers and bailout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * to a different point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) START_EXCEPTION(InstructionTLBError47x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) mtspr SPRN_SPRG_WSCRATCH1,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) mtspr SPRN_SPRG_WSCRATCH2,r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) mtspr SPRN_SPRG_WSCRATCH3,r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) mfcr r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) mtspr SPRN_SPRG_WSCRATCH4,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) mfspr r10,SPRN_SRR0 /* Get faulting address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /* If we are faulting a kernel address, we have to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * kernel page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) lis r11,PAGE_OFFSET@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) cmplw cr0,r10,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) blt+ 3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) lis r11,swapper_pg_dir@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) ori r11,r11, swapper_pg_dir@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) li r12,0 /* MMUCR = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) b 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* Get the PGD for the current thread and setup MMUCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 3: mfspr r11,SPRN_SPRG_THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) lwz r11,PGDIR(r11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) mfspr r12,SPRN_PID /* Get PID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* Make up the required permissions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /* Load PTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* Compute pgdir/pmd offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) lwzx r11,r12,r11 /* Get pgd/pmd entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /* Word 0 is EPN,V,TS,DSIZ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) li r12,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) tlbwe r10,r12,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /* XXX can we do better ? Need to make sure tlbwe has established
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * latch V bit in MMUCR0 before the PTE is loaded further down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) rlwinm. r12,r11,0,0,20 /* Extract pt base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* Compute pte address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) beq 2f /* Bail if no table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) lwz r11,0(r12) /* Get high word of pte entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * bottom of r12 to create a data dependency... We can also use r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * as destination nowadays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) lwsync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) lwz r12,4(r12) /* Get low word of pte entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) andc. r13,r13,r12 /* Check permission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* Jump to common TLB load point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) beq finish_tlb_load_47x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 2: /* The bailout. Restore registers to pre-exception conditions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * and call the heavyweights to help us out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) mfspr r11, SPRN_SPRG_RSCRATCH4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) mtcr r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) mfspr r13, SPRN_SPRG_RSCRATCH3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) mfspr r12, SPRN_SPRG_RSCRATCH2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) mfspr r11, SPRN_SPRG_RSCRATCH1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) mfspr r10, SPRN_SPRG_RSCRATCH0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) b InstructionStorage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * Both the instruction and data TLB miss get to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * point to load the TLB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * r10 - free to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * r11 - PTE high word value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * r12 - PTE low word value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * r13 - free to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * MMUCR - loaded with proper value when we get here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * Upon exit, we reload everything and RFI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) finish_tlb_load_47x:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) /* Combine RPN & ERPN an write WS 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) rlwimi r11,r12,0,0,31-PAGE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) tlbwe r11,r13,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* And make up word 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) li r10,0xf85 /* Mask to apply from PTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) and r11,r12,r10 /* Mask PTE bits to keep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) andi. r10,r12,_PAGE_USER /* User page ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) beq 1f /* nope, leave U bits empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 1: tlbwe r11,r13,2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) /* Done...restore registers and get out of here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) mfspr r11, SPRN_SPRG_RSCRATCH4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) mtcr r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) mfspr r13, SPRN_SPRG_RSCRATCH3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) mfspr r12, SPRN_SPRG_RSCRATCH2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) mfspr r11, SPRN_SPRG_RSCRATCH1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) mfspr r10, SPRN_SPRG_RSCRATCH0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) rfi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) #endif /* CONFIG_PPC_47x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* Debug Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * This statement needs to exist at the end of the IVPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * definition just in case you end up taking a debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * exception within another exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) DEBUG_CRIT_EXCEPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) interrupt_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * Global functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * Adjust the machine check IVOR on 440A cores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) _GLOBAL(__fixup_440A_mcheck)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) li r3,MachineCheckA@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) mtspr SPRN_IVOR1,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) _GLOBAL(set_context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) #ifdef CONFIG_BDI_SWITCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /* Context switch the PTE pointer for the Abatron BDI2000.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * The PGDIR is the second parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) lis r5, abatron_pteptrs@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ori r5, r5, abatron_pteptrs@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) stw r4, 0x4(r5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) mtspr SPRN_PID,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) isync /* Force context change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * Init CPU state. This is called at boot time or for secondary CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * to setup initial TLB entries, setup IVORs, etc...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) _GLOBAL(init_cpu_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) mflr r22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) #ifdef CONFIG_PPC_47x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) /* We use the PVR to differentiate 44x cores from 476 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) mfspr r3,SPRN_PVR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) srwi r3,r3,16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) cmplwi cr0,r3,PVR_476FPE@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) beq head_start_47x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) cmplwi cr0,r3,PVR_476@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) beq head_start_47x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) cmplwi cr0,r3,PVR_476_ISS@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) beq head_start_47x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) #endif /* CONFIG_PPC_47x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * In case the firmware didn't do it, we apply some workarounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * that are good for all 440 core variants here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) mfspr r3,SPRN_CCR0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) rlwinm r3,r3,0,0,27 /* disable icache prefetch */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) mtspr SPRN_CCR0,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * Set up the initial MMU state for 44x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * We are still executing code at the virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * mappings set by the firmware for the base of RAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * We first invalidate all TLB entries but the one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * we are running from. We then load the KERNELBASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * mappings so we can begin to use kernel addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * natively and so the interrupt vector locations are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * permanently pinned (necessary since Book E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * implementations always have translation enabled).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * TODO: Use the known TLB entry we are running from to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * determine which physical region we are located
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * in. This can be used to determine where in RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * (on a shared CPU system) or PCI memory space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * (on a DRAMless system) we are located.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * For now, we assume a perfect world which means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * we are located at the base of DRAM (physical 0).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * Search TLB for entry that we are currently using.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * Invalidate all entries but the one we are using.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) mfspr r3,SPRN_PID /* Get PID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) mfmsr r4 /* Get MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) andi. r4,r4,MSR_IS@l /* TS=1? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) beq wmmucr /* If not, leave STS=0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) bl invstr /* Find our address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) invstr: mflr r5 /* Make it accessible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) tlbsx r23,0,r5 /* Find entry we are in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) li r4,0 /* Start at TLB entry 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) li r3,0 /* Set PAGEID inval value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 1: cmpw r23,r4 /* Is this our entry? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) beq skpinv /* If so, skip the inval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) skpinv: addi r4,r4,1 /* Increment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) cmpwi r4,64 /* Are we done? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) bne 1b /* If not, repeat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) isync /* If so, context change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * Configure and load pinned entry into TLB slot 63.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) #ifdef CONFIG_NONSTATIC_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * entries of the initial mapping set by the boot loader.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * The XLAT entry is stored in r25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* Read the XLAT entry for our current mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) tlbre r25,r23,PPC44x_TLB_XLAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) lis r3,KERNELBASE@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) ori r3,r3,KERNELBASE@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /* Use our current RPN entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) mr r4,r25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) lis r3,PAGE_OFFSET@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) ori r3,r3,PAGE_OFFSET@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) /* Kernel is at the base of RAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) li r4, 0 /* Load the kernel physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) /* Load the kernel PID = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) li r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) mtspr SPRN_PID,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) /* Initialize MMUCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) li r5,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) mtspr SPRN_MMUCR,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /* pageid fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) clrrwi r3,r3,10 /* Mask off the effective page number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /* xlat fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) clrrwi r4,r4,10 /* Mask off the real page number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) /* ERPN is 0 for first 4GB page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* attrib fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /* Added guarded bit to protect against speculative loads/stores */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) li r5,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) li r0,63 /* TLB slot 63 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /* Force context change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) mfmsr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) mtspr SPRN_SRR1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) lis r0,3f@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) ori r0,r0,3f@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) mtspr SPRN_SRR0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) rfi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) /* If necessary, invalidate original entry we used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) 3: cmpwi r23,63
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) beq 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) li r6,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) tlbwe r6,r23,PPC44x_TLB_PAGEID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) #ifdef CONFIG_PPC_EARLY_DEBUG_44x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* Add UART mapping for early debug. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) /* pageid fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /* xlat fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /* attrib fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) li r0,62 /* TLB slot 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) tlbwe r3,r0,PPC44x_TLB_PAGEID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) tlbwe r4,r0,PPC44x_TLB_XLAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) tlbwe r5,r0,PPC44x_TLB_ATTRIB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /* Force context change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) #endif /* CONFIG_PPC_EARLY_DEBUG_44x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /* Establish the interrupt vector offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) SET_IVOR(0, CriticalInput);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) SET_IVOR(1, MachineCheck);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) SET_IVOR(2, DataStorage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) SET_IVOR(3, InstructionStorage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) SET_IVOR(4, ExternalInput);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) SET_IVOR(5, Alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) SET_IVOR(6, Program);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) SET_IVOR(7, FloatingPointUnavailable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) SET_IVOR(8, SystemCall);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) SET_IVOR(9, AuxillaryProcessorUnavailable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) SET_IVOR(10, Decrementer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) SET_IVOR(11, FixedIntervalTimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) SET_IVOR(12, WatchdogTimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) SET_IVOR(13, DataTLBError44x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) SET_IVOR(14, InstructionTLBError44x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) SET_IVOR(15, DebugCrit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) b head_start_common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) #ifdef CONFIG_PPC_47x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /* Entry point for secondary 47x processors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) _GLOBAL(start_secondary_47x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) mr r24,r3 /* CPU number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) bl init_cpu_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /* Now we need to bolt the rest of kernel memory which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * is done in C code. We must be careful because our task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * struct or our stack can (and will probably) be out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * of reach of the initial 256M TLB entry, so we use a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * small temporary stack in .bss for that. This works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * because only one CPU at a time can be in this code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) lis r1,temp_boot_stack@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) ori r1,r1,temp_boot_stack@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) addi r1,r1,1024-STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) li r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) stw r0,0(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) bl mmu_init_secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) /* Now we can get our task struct and real stack pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) /* Get current's stack and current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) lis r2,secondary_current@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) lwz r2,secondary_current@l(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) lwz r1,TASK_STACK(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /* Current stack pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) li r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) stw r0,0(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /* Kernel stack for exception entry in SPRG3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) addi r4,r2,THREAD /* init task's THREAD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) mtspr SPRN_SPRG3,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) b start_secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * Set up the initial MMU state for 44x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * We are still executing code at the virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * mappings set by the firmware for the base of RAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) head_start_47x:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) mfspr r3,SPRN_PID /* Get PID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) mfmsr r4 /* Get MSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) andi. r4,r4,MSR_IS@l /* TS=1? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) beq 1f /* If not, leave STS=0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 1: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /* Find the entry we are running from */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) bl 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 1: mflr r23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) tlbsx r23,0,r23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) tlbre r24,r23,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) tlbre r25,r23,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) tlbre r26,r23,2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * Cleanup time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /* Initialize MMUCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) li r5,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) mtspr SPRN_MMUCR,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) clear_all_utlb_entries:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) #; Set initial values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) addis r3,0,0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) addi r4,0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) addi r5,0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) b clear_utlb_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) #; Align the loop to speed things up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) .align 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) clear_utlb_entry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) tlbwe r4,r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) tlbwe r5,r3,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) tlbwe r5,r3,2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) addis r3,r3,0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) cmpwi r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) bne clear_utlb_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) addis r3,0,0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) addis r4,r4,0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) cmpwi r4,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) bne clear_utlb_entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) #; Restore original entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) oris r23,r23,0x8000 /* specify the way */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) tlbwe r24,r23,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) tlbwe r25,r23,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) tlbwe r26,r23,2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * Configure and load pinned entry into TLB for the kernel core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) lis r3,PAGE_OFFSET@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) ori r3,r3,PAGE_OFFSET@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /* Load the kernel PID = 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) li r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) mtspr SPRN_PID,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /* Word 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) clrrwi r3,r3,12 /* Mask off the effective page number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /* Word 1 - use r25. RPN is the same as the original entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) /* Word 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) li r5,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) ori r5,r5,PPC47x_TLB2_S_RWX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) ori r5,r5,PPC47x_TLB2_M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /* We write to way 0 and bolted 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) lis r0,0x8800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) tlbwe r3,r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) tlbwe r25,r0,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) tlbwe r5,r0,2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * them up later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) mtspr SPRN_SSPCR,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) mtspr SPRN_USPCR,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) LOAD_REG_IMMEDIATE(r3, 0x12345670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) mtspr SPRN_ISPCR,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /* Force context change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) mfmsr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) mtspr SPRN_SRR1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) lis r0,3f@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) ori r0,r0,3f@l
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) mtspr SPRN_SRR0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) rfi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) /* Invalidate original entry we used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) rlwinm r24,r24,0,21,19 /* clear the "valid" bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) tlbwe r24,r23,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) addi r24,0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) tlbwe r24,r23,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) tlbwe r24,r23,2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) isync /* Clear out the shadow TLB entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) #ifdef CONFIG_PPC_EARLY_DEBUG_44x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /* Add UART mapping for early debug. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* Word 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /* Word 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /* Word 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * congruence class as the kernel, we need to make sure of it at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * some point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) lis r0,0x8d00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) tlbwe r3,r0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) tlbwe r4,r0,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) tlbwe r5,r0,2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) /* Force context change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) #endif /* CONFIG_PPC_EARLY_DEBUG_44x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) /* Establish the interrupt vector offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) SET_IVOR(0, CriticalInput);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) SET_IVOR(1, MachineCheckA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) SET_IVOR(2, DataStorage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) SET_IVOR(3, InstructionStorage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) SET_IVOR(4, ExternalInput);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) SET_IVOR(5, Alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) SET_IVOR(6, Program);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) SET_IVOR(7, FloatingPointUnavailable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) SET_IVOR(8, SystemCall);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) SET_IVOR(9, AuxillaryProcessorUnavailable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) SET_IVOR(10, Decrementer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) SET_IVOR(11, FixedIntervalTimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) SET_IVOR(12, WatchdogTimer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) SET_IVOR(13, DataTLBError47x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) SET_IVOR(14, InstructionTLBError47x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) SET_IVOR(15, DebugCrit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /* We configure icbi to invalidate 128 bytes at a time since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * current 32-bit kernel code isn't too happy with icache != dcache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * block size. We also disable the BTAC as this can cause errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * in some circumstances (see IBM Erratum 47).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) mfspr r3,SPRN_CCR0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) oris r3,r3,0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) ori r3,r3,0x0040
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) mtspr SPRN_CCR0,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) #endif /* CONFIG_PPC_47x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) * Here we are back to code that is common between 44x and 47x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * We proceed to further kernel initialization and return to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * main kernel entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) head_start_common:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) /* Establish the interrupt vector base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) mtspr SPRN_IVPR,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * If the kernel was loaded at a non-zero 256 MB page, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * mask off the most significant 4 bits to get the relative address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * from the start of physical memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) rlwinm r22,r22,0,4,31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) addis r22,r22,PAGE_OFFSET@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) mtlr r22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) * We put a few things here that have to be page-aligned. This stuff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * goes at the beginning of the data segment, which is page-aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) .data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) .align PAGE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) .globl sdata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) sdata:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) .globl empty_zero_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) empty_zero_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) .space PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) EXPORT_SYMBOL(empty_zero_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * To support >32-bit physical addresses, we use an 8KB pgdir.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) .globl swapper_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) swapper_pg_dir:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) .space PGD_TABLE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * Room for two PTE pointers, usually the kernel and current user pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * to their respective root page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) abatron_pteptrs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) .space 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) .align 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) temp_boot_stack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) .space 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) #endif /* CONFIG_SMP */