^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * File: mca_asm.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Purpose: assembly portion of the IA64 MCA handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Mods by cfleck to integrate into kernel build
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * 2000-03-15 David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Added various stop bits to get a clean compile
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Added code to save INIT handoff state in pt_regs format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * switch to temp kstack, switch modes, jump to C INIT handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * 2002-01-04 J.Hall <jenna.s.hall@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Before entering virtual mode code:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * 1. Check for TLB CPU error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * 2. Restore current thread pointer to kr6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * 3. Move stack ptr 16 bytes to conform to C calling convention
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * 2004-11-12 Russ Anderson <rja@sgi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Added per cpu MCA/INIT stack save areas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * 2005-12-08 Keith Owens <kaos@sgi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Use per cpu MCA/INIT stacks for all data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/asmmacro.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/mca_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/mca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "entry.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define GET_IA64_MCA_DATA(reg) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) GET_THIS_PADDR(reg, ia64_mca_data) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) ;; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) ld8 reg=[reg]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) .global ia64_do_tlb_purge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) .global ia64_os_mca_dispatch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) .global ia64_os_init_on_kdump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .global ia64_os_init_dispatch_monarch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .global ia64_os_init_dispatch_slave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) .align 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) //StartMain////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Just the TLB purge part is moved to a separate function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * so we can re-use the code for cpu hotplug code as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Caller should now setup b1, so we can branch once the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * tlb flush is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) ia64_do_tlb_purge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define O(member) IA64_CPUINFO_##member##_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) addl r17=O(PTCE_STRIDE),r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) addl r2=O(PTCE_BASE),r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ld8 r18=[r2],(O(PTCE_COUNT)-O(PTCE_BASE));; // r18=ptce_base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) ld4 r19=[r2],4 // r19=ptce_count[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ld4 r21=[r17],4 // r21=ptce_stride[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ld4 r20=[r2] // r20=ptce_count[1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ld4 r22=[r17] // r22=ptce_stride[1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) mov r24=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) adds r20=-1,r20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #undef O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) cmp.ltu p6,p7=r24,r19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) (p7) br.cond.dpnt.few 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) mov ar.lc=r20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ptc.e r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) add r18=r22,r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) br.cloop.sptk.few 3b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) add r18=r21,r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) add r24=1,r24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) br.sptk.few 2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) srlz.i // srlz.i implies srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) // Now purge addresses formerly mapped by TR registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) // 1. Purge ITR&DTR for kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) movl r16=KERNEL_START
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) mov r18=KERNEL_TR_PAGE_SHIFT<<2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ptr.i r16, r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ptr.d r16, r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) srlz.i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) // 3. Purge ITR for PAL code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) GET_THIS_PADDR(r2, ia64_mca_pal_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) ld8 r16=[r2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) mov r18=IA64_GRANULE_SHIFT<<2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) ptr.i r16,r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) srlz.i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) // 4. Purge DTR for stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) mov r16=IA64_KR(CURRENT_STACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) shl r16=r16,IA64_GRANULE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) movl r19=PAGE_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) add r16=r19,r16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) mov r18=IA64_GRANULE_SHIFT<<2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ptr.d r16,r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) srlz.i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) // Now branch away to caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) br.sptk.many b1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) //EndMain//////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) //StartMain////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ia64_os_mca_dispatch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) LOAD_PHYSICAL(p0,r2,1f) // return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) mov r19=1 // All MCA events are treated as monarch (for now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) br.sptk ia64_state_save // save the state that is not in minstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) GET_IA64_MCA_DATA(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) // Using MCA stack, struct ia64_sal_os_state, variable proc_state_param
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+SOS(PROC_STATE_PARAM), r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) tbit.nz p6,p7=r18,60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) (p7) br.spnt done_tlb_purge_and_reload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) // The following code purges TC and TR entries. Then reload all TC entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) // Purge percpu data TC entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) begin_tlb_purge_and_reload:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) movl r18=ia64_reload_tr;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) LOAD_PHYSICAL(p0,r18,ia64_reload_tr);;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) mov b1=r18;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) br.sptk.many ia64_do_tlb_purge;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) ia64_reload_tr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) // Finally reload the TR registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) // 1. Reload DTR/ITR registers for kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) mov r18=KERNEL_TR_PAGE_SHIFT<<2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) movl r17=KERNEL_START
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) mov cr.itir=r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) mov cr.ifa=r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) mov r16=IA64_TR_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) mov r19=ip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) movl r18=PAGE_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) dep r17=0,r19,0, KERNEL_TR_PAGE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) or r18=r17,r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) itr.i itr[r16]=r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) itr.d dtr[r16]=r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) srlz.i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) // 3. Reload ITR for PAL code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) GET_THIS_PADDR(r2, ia64_mca_pal_pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ld8 r18=[r2] // load PAL PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) GET_THIS_PADDR(r2, ia64_mca_pal_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ld8 r16=[r2] // load PAL vaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) mov r19=IA64_GRANULE_SHIFT<<2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) mov cr.itir=r19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) mov cr.ifa=r16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) mov r20=IA64_TR_PALCODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) itr.i itr[r20]=r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) srlz.i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) // 4. Reload DTR for stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) mov r16=IA64_KR(CURRENT_STACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) shl r16=r16,IA64_GRANULE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) movl r19=PAGE_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) add r18=r19,r16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) movl r20=PAGE_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) add r16=r20,r16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) mov r19=IA64_GRANULE_SHIFT<<2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) mov cr.itir=r19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) mov cr.ifa=r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) mov r20=IA64_TR_CURRENT_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) itr.d dtr[r20]=r16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) GET_THIS_PADDR(r2, ia64_mca_tr_reload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) mov r18 = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) st8 [r2] =r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) done_tlb_purge_and_reload:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) // switch to per cpu MCA stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) LOAD_PHYSICAL(p0,r2,1f) // return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) br.sptk ia64_new_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) // everything saved, now we can set the kernel registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) LOAD_PHYSICAL(p0,r2,1f) // return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) br.sptk ia64_set_kernel_registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) // This must be done in physical mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) GET_IA64_MCA_DATA(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) mov r7=r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) // Enter virtual mode from physical mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) VIRTUAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_begin, r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) // This code returns to SAL via SOS r2, in general SAL has no unwind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) // data. To get a clean termination when backtracing the C MCA/INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) // handler, set a dummy return address of 0 in this routine. That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) // requires that ia64_os_mca_virtual_begin be a global function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ENTRY(ia64_os_mca_virtual_begin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) .prologue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) .save rp,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .body
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) mov ar.rsc=3 // set eager mode for C handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) mov r2=r7 // see GET_IA64_MCA_DATA above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) // Call virtual mode handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) alloc r14=ar.pfs,0,0,3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) DATA_PA_TO_VA(r2,r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) add out0=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) add out1=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) add out2=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) br.call.sptk.many b0=ia64_mca_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) // Revert back to physical mode before going back to SAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) PHYSICAL_MODE_ENTER(r2, r3, ia64_os_mca_virtual_end, r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ia64_os_mca_virtual_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) END(ia64_os_mca_virtual_begin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) // switch back to previous stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) alloc r14=ar.pfs,0,0,0,0 // remove the MCA handler frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) LOAD_PHYSICAL(p0,r2,1f) // return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) br.sptk ia64_old_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) mov r3=IA64_MCA_CPU_MCA_STACK_OFFSET // use the MCA stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) LOAD_PHYSICAL(p0,r2,1f) // return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) br.sptk ia64_state_restore // restore the SAL state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) mov b0=r12 // SAL_CHECK return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) br b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) //EndMain//////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) //StartMain////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) // NOP init handler for kdump. In panic situation, we may receive INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) // while kernel transition. Since we initialize registers on leave from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) // current kernel, no longer monarch/slave handlers of current kernel in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) // virtual mode are called safely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) // We can unregister these init handlers from SAL, however then the INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) // will result in warmboot by SAL and we cannot retrieve the crashdump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) // Therefore register this NOP function to SAL, to prevent entering virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) // mode and resulting warmboot by SAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ia64_os_init_on_kdump:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) mov r8=r0 // IA64_INIT_RESUME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) mov r9=r10 // SAL_GP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) mov r22=r17 // *minstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) mov r10=r0 // return to same context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) mov b0=r12 // SAL_CHECK return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) br b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) // SAL to OS entry point for INIT on all processors. This has been defined for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) // registration purposes with SAL as a part of ia64_mca_init. Monarch and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) // slave INIT have identical processing, except for the value of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) // sos->monarch flag in r19.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ia64_os_init_dispatch_monarch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) mov r19=1 // Bow, bow, ye lower middle classes!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) br.sptk ia64_os_init_dispatch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) ia64_os_init_dispatch_slave:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) mov r19=0 // <igor>yeth, mathter</igor>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ia64_os_init_dispatch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) LOAD_PHYSICAL(p0,r2,1f) // return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) br.sptk ia64_state_save // save the state that is not in minstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) // switch to per cpu INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) LOAD_PHYSICAL(p0,r2,1f) // return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) br.sptk ia64_new_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) // everything saved, now we can set the kernel registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) LOAD_PHYSICAL(p0,r2,1f) // return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) br.sptk ia64_set_kernel_registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) // This must be done in physical mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) GET_IA64_MCA_DATA(r2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) mov r7=r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) // Enter virtual mode from physical mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) VIRTUAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_begin, r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) // This code returns to SAL via SOS r2, in general SAL has no unwind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) // data. To get a clean termination when backtracing the C MCA/INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) // handler, set a dummy return address of 0 in this routine. That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) // requires that ia64_os_init_virtual_begin be a global function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ENTRY(ia64_os_init_virtual_begin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) .prologue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) .save rp,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) .body
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) mov ar.rsc=3 // set eager mode for C handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) mov r2=r7 // see GET_IA64_MCA_DATA above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) // Call virtual mode handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) alloc r14=ar.pfs,0,0,3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) DATA_PA_TO_VA(r2,r7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) add out0=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_PT_REGS_OFFSET, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) add out1=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SWITCH_STACK_OFFSET, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) add out2=IA64_MCA_CPU_INIT_STACK_OFFSET+MCA_SOS_OFFSET, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) br.call.sptk.many b0=ia64_init_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) // Revert back to physical mode before going back to SAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) PHYSICAL_MODE_ENTER(r2, r3, ia64_os_init_virtual_end, r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ia64_os_init_virtual_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) END(ia64_os_init_virtual_begin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) LOAD_PHYSICAL(p0,r2,1f) // return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) br.sptk ia64_state_restore // restore the SAL state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) // switch back to previous stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) alloc r14=ar.pfs,0,0,0,0 // remove the INIT handler frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) mov r3=IA64_MCA_CPU_INIT_STACK_OFFSET // use the INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) LOAD_PHYSICAL(p0,r2,1f) // return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) br.sptk ia64_old_stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) mov b0=r12 // SAL_CHECK return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) br b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) //EndMain//////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) // common defines for the stubs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) #define ms r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #define regs r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) #define temp1 r2 /* careful, it overlaps with input registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) #define temp2 r3 /* careful, it overlaps with input registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) #define temp3 r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) #define temp4 r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) //++
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) // Name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) // ia64_state_save()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) // Stub Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) // Save the state that is not in minstate. This is sensitive to the layout of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) // struct ia64_sal_os_state in mca.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) // r2 contains the return address, r3 contains either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) // The OS to SAL section of struct ia64_sal_os_state is set to a default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) // value of cold boot (MCA) or warm boot (INIT) and return to the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) // context. ia64_sal_os_state is also used to hold some registers that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) // need to be saved and restored across the stack switches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) // Most input registers to this stub come from PAL/SAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) // r1 os gp, physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) // r8 pal_proc entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) // r9 sal_proc entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) // r10 sal gp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) // r11 MCA - rendevzous state, INIT - reason code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) // r12 sal return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) // r17 pal min_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) // r18 processor state parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) // r19 monarch flag, set by the caller of this routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) // In addition to the SAL to OS state, this routine saves all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) // registers that appear in struct pt_regs and struct switch_stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) // excluding those that are already in the PAL minstate area. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) // results in a partial pt_regs and switch_stack, the C code copies the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) // remaining registers from PAL minstate to pt_regs and switch_stack. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) // resulting structures contain all the state of the original process when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) // MCA/INIT occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) //--
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) ia64_state_save:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) add regs=MCA_SOS_OFFSET, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) add ms=MCA_SOS_OFFSET+8, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) mov b0=r2 // save return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) cmp.eq p1,p2=IA64_MCA_CPU_MCA_STACK_OFFSET, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) GET_IA64_MCA_DATA(temp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) add temp1=temp2, regs // struct ia64_sal_os_state on MCA or INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) add temp2=temp2, ms // struct ia64_sal_os_state+8 on MCA or INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) mov regs=temp1 // save the start of sos
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) st8 [temp1]=r1,16 // os_gp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) st8 [temp2]=r8,16 // pal_proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) st8 [temp1]=r9,16 // sal_proc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) st8 [temp2]=r11,16 // rv_rc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) mov r11=cr.iipa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) st8 [temp1]=r18 // proc_state_param
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) st8 [temp2]=r19 // monarch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) mov r6=IA64_KR(CURRENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) add temp1=SOS(SAL_RA), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) add temp2=SOS(SAL_GP), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) st8 [temp1]=r12,16 // sal_ra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) st8 [temp2]=r10,16 // sal_gp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) mov r12=cr.isr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) st8 [temp1]=r17,16 // pal_min_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) mov r6=IA64_KR(CURRENT_STACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) st8 [temp2]=r0,16 // prev_task, starts off as NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) mov r6=cr.ifa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) st8 [temp1]=r12,16 // cr.isr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) st8 [temp2]=r6,16 // cr.ifa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) mov r12=cr.itir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) st8 [temp1]=r12,16 // cr.itir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) st8 [temp2]=r11,16 // cr.iipa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) mov r12=cr.iim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) st8 [temp1]=r12 // cr.iim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) (p1) mov r12=IA64_MCA_COLD_BOOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) (p2) mov r12=IA64_INIT_WARM_BOOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) mov r6=cr.iha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) add temp1=SOS(OS_STATUS), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) st8 [temp2]=r6 // cr.iha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) add temp2=SOS(CONTEXT), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) st8 [temp1]=r12 // os_status, default is cold boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) mov r6=IA64_MCA_SAME_CONTEXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) st8 [temp2]=r6 // context, default is same context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) // Save the pt_regs data that is not in minstate. The previous code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) // left regs at sos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) add regs=MCA_PT_REGS_OFFSET-MCA_SOS_OFFSET, regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) add temp1=PT(B6), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) mov temp3=b6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) mov temp4=b7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) add temp2=PT(B7), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) st8 [temp1]=temp3,PT(AR_CSD)-PT(B6) // save b6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) st8 [temp2]=temp4,PT(AR_SSD)-PT(B7) // save b7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) mov temp3=ar.csd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) mov temp4=ar.ssd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) cover // must be last in group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) st8 [temp1]=temp3,PT(AR_UNAT)-PT(AR_CSD) // save ar.csd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) st8 [temp2]=temp4,PT(AR_PFS)-PT(AR_SSD) // save ar.ssd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) mov temp3=ar.unat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) mov temp4=ar.pfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) st8 [temp1]=temp3,PT(AR_RNAT)-PT(AR_UNAT) // save ar.unat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) st8 [temp2]=temp4,PT(AR_BSPSTORE)-PT(AR_PFS) // save ar.pfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) mov temp3=ar.rnat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) mov temp4=ar.bspstore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) st8 [temp1]=temp3,PT(LOADRS)-PT(AR_RNAT) // save ar.rnat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) st8 [temp2]=temp4,PT(AR_FPSR)-PT(AR_BSPSTORE) // save ar.bspstore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) mov temp3=ar.bsp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) sub temp3=temp3, temp4 // ar.bsp - ar.bspstore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) mov temp4=ar.fpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) shl temp3=temp3,16 // compute ar.rsc to be used for "loadrs"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) st8 [temp1]=temp3,PT(AR_CCV)-PT(LOADRS) // save loadrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) st8 [temp2]=temp4,PT(F6)-PT(AR_FPSR) // save ar.fpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) mov temp3=ar.ccv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) st8 [temp1]=temp3,PT(F7)-PT(AR_CCV) // save ar.ccv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) stf.spill [temp2]=f6,PT(F8)-PT(F6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) stf.spill [temp1]=f7,PT(F9)-PT(F7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) stf.spill [temp2]=f8,PT(F10)-PT(F8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) stf.spill [temp1]=f9,PT(F11)-PT(F9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) stf.spill [temp2]=f10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) stf.spill [temp1]=f11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) // Save the switch_stack data that is not in minstate nor pt_regs. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) // previous code left regs at pt_regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) add regs=MCA_SWITCH_STACK_OFFSET-MCA_PT_REGS_OFFSET, regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) add temp1=SW(F2), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) add temp2=SW(F3), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) stf.spill [temp1]=f2,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) stf.spill [temp2]=f3,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) stf.spill [temp1]=f4,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) stf.spill [temp2]=f5,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) stf.spill [temp1]=f12,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) stf.spill [temp2]=f13,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) stf.spill [temp1]=f14,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) stf.spill [temp2]=f15,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) stf.spill [temp1]=f16,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) stf.spill [temp2]=f17,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) stf.spill [temp1]=f18,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) stf.spill [temp2]=f19,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) stf.spill [temp1]=f20,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) stf.spill [temp2]=f21,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) stf.spill [temp1]=f22,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) stf.spill [temp2]=f23,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) stf.spill [temp1]=f24,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) stf.spill [temp2]=f25,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) stf.spill [temp1]=f26,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) stf.spill [temp2]=f27,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) stf.spill [temp1]=f28,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) stf.spill [temp2]=f29,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) stf.spill [temp1]=f30,SW(B2)-SW(F30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) stf.spill [temp2]=f31,SW(B3)-SW(F31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) mov temp3=b2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) mov temp4=b3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) st8 [temp1]=temp3,16 // save b2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) st8 [temp2]=temp4,16 // save b3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) mov temp3=b4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) mov temp4=b5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) st8 [temp1]=temp3,SW(AR_LC)-SW(B4) // save b4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) st8 [temp2]=temp4 // save b5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) mov temp3=ar.lc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) st8 [temp1]=temp3 // save ar.lc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) // FIXME: Some proms are incorrectly accessing the minstate area as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) // cached data. The C code uses region 6, uncached virtual. Ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) // that there is no cache data lying around for the first 1K of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) // minstate area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) // Remove this code in September 2006, that gives platforms a year to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) // fix their proms and get their customers updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) add r1=32*1,r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) add r2=32*2,r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) add r3=32*3,r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) add r4=32*4,r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) add r5=32*5,r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) add r6=32*6,r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) add r7=32*7,r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) fc r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) fc r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) fc r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) fc r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) fc r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) fc r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) fc r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) fc r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) add r17=32*8,r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) add r1=32*8,r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) add r2=32*8,r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) add r3=32*8,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) add r4=32*8,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) add r5=32*8,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) add r6=32*8,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) add r7=32*8,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) fc r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) fc r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) fc r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) fc r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) fc r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) fc r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) fc r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) fc r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) add r17=32*8,r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) add r1=32*8,r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) add r2=32*8,r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) add r3=32*8,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) add r4=32*8,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) add r5=32*8,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) add r6=32*8,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) add r7=32*8,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) fc r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) fc r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) fc r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) fc r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) fc r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) fc r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) fc r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) fc r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) add r17=32*8,r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) add r1=32*8,r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) add r2=32*8,r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) add r3=32*8,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) add r4=32*8,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) add r5=32*8,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) add r6=32*8,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) add r7=32*8,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) fc r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) fc r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) fc r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) fc r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) fc r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) fc r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) fc r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) fc r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) br.sptk b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) //EndStub//////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) //++
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) // Name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) // ia64_state_restore()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) // Stub Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) // Restore the SAL/OS state. This is sensitive to the layout of struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) // ia64_sal_os_state in mca.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) // r2 contains the return address, r3 contains either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) // In addition to the SAL to OS state, this routine restores all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) // registers that appear in struct pt_regs and struct switch_stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) // excluding those in the PAL minstate area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) //--
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) ia64_state_restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) // Restore the switch_stack data that is not in minstate nor pt_regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) add regs=MCA_SWITCH_STACK_OFFSET, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) mov b0=r2 // save return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) GET_IA64_MCA_DATA(temp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) add regs=temp2, regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) add temp1=SW(F2), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) add temp2=SW(F3), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) ldf.fill f2=[temp1],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ldf.fill f3=[temp2],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ldf.fill f4=[temp1],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ldf.fill f5=[temp2],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) ldf.fill f12=[temp1],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ldf.fill f13=[temp2],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ldf.fill f14=[temp1],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ldf.fill f15=[temp2],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ldf.fill f16=[temp1],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) ldf.fill f17=[temp2],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ldf.fill f18=[temp1],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ldf.fill f19=[temp2],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) ldf.fill f20=[temp1],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) ldf.fill f21=[temp2],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) ldf.fill f22=[temp1],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ldf.fill f23=[temp2],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ldf.fill f24=[temp1],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) ldf.fill f25=[temp2],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ldf.fill f26=[temp1],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) ldf.fill f27=[temp2],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ldf.fill f28=[temp1],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) ldf.fill f29=[temp2],32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ldf.fill f30=[temp1],SW(B2)-SW(F30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ldf.fill f31=[temp2],SW(B3)-SW(F31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ld8 temp3=[temp1],16 // restore b2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) ld8 temp4=[temp2],16 // restore b3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) mov b2=temp3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) mov b3=temp4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) ld8 temp3=[temp1],SW(AR_LC)-SW(B4) // restore b4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ld8 temp4=[temp2] // restore b5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) mov b4=temp3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) mov b5=temp4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) ld8 temp3=[temp1] // restore ar.lc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) mov ar.lc=temp3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) // Restore the pt_regs data that is not in minstate. The previous code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) // left regs at switch_stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) add regs=MCA_PT_REGS_OFFSET-MCA_SWITCH_STACK_OFFSET, regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) add temp1=PT(B6), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) add temp2=PT(B7), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) ld8 temp3=[temp1],PT(AR_CSD)-PT(B6) // restore b6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ld8 temp4=[temp2],PT(AR_SSD)-PT(B7) // restore b7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) mov b6=temp3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) mov b7=temp4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) ld8 temp3=[temp1],PT(AR_UNAT)-PT(AR_CSD) // restore ar.csd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ld8 temp4=[temp2],PT(AR_PFS)-PT(AR_SSD) // restore ar.ssd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) mov ar.csd=temp3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) mov ar.ssd=temp4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) ld8 temp3=[temp1] // restore ar.unat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) add temp1=PT(AR_CCV)-PT(AR_UNAT), temp1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ld8 temp4=[temp2],PT(AR_FPSR)-PT(AR_PFS) // restore ar.pfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) mov ar.unat=temp3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) mov ar.pfs=temp4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) // ar.rnat, ar.bspstore, loadrs are restore in ia64_old_stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) ld8 temp3=[temp1],PT(F6)-PT(AR_CCV) // restore ar.ccv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) ld8 temp4=[temp2],PT(F7)-PT(AR_FPSR) // restore ar.fpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) mov ar.ccv=temp3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) mov ar.fpsr=temp4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) ldf.fill f6=[temp1],PT(F8)-PT(F6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) ldf.fill f7=[temp2],PT(F9)-PT(F7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) ldf.fill f8=[temp1],PT(F10)-PT(F8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) ldf.fill f9=[temp2],PT(F11)-PT(F9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) ldf.fill f10=[temp1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) ldf.fill f11=[temp2]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) // Restore the SAL to OS state. The previous code left regs at pt_regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) add temp1=SOS(SAL_RA), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) add temp2=SOS(SAL_GP), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ld8 r12=[temp1],16 // sal_ra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) ld8 r9=[temp2],16 // sal_gp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) ld8 r22=[temp1],16 // pal_min_state, virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) ld8 r13=[temp2],16 // prev_IA64_KR_CURRENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) ld8 r20=[temp2],16 // prev_task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) ld8 temp3=[temp1],16 // cr.isr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ld8 temp4=[temp2],16 // cr.ifa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) mov cr.isr=temp3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) mov cr.ifa=temp4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) ld8 temp3=[temp1],16 // cr.itir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) ld8 temp4=[temp2],16 // cr.iipa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) mov cr.itir=temp3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) mov cr.iipa=temp4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) ld8 temp3=[temp1] // cr.iim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) ld8 temp4=[temp2] // cr.iha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) add temp1=SOS(OS_STATUS), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) add temp2=SOS(CONTEXT), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) mov cr.iim=temp3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) mov cr.iha=temp4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) dep r22=0,r22,62,1 // pal_min_state, physical, uncached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) mov IA64_KR(CURRENT)=r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) ld8 r8=[temp1] // os_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) ld8 r10=[temp2] // context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * avoid any dependencies on the algorithm in ia64_switch_to(), just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * purge any existing CURRENT_STACK mapping and insert the new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * r16 contains prev_IA64_KR_CURRENT_STACK, r13 contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * prev_IA64_KR_CURRENT, these values may have been changed by the C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * code. Do not use r8, r9, r10, r22, they contain values ready for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * the return to SAL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) shl r15=r15,IA64_GRANULE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) dep r15=-1,r15,61,3 // virtual granule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) ptr.d r15,r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) extr.u r19=r13,61,3 // r13 = prev_IA64_KR_CURRENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) movl r21=PAGE_KERNEL // page properties
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) mov IA64_KR(CURRENT_STACK)=r16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) or r21=r20,r21 // construct PA | page properties
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) (p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) mov cr.itir=r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) mov cr.ifa=r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) mov r20=IA64_TR_CURRENT_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) itr.d dtr[r20]=r21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) br.sptk b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) //EndStub//////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) //++
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) // Name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) // ia64_new_stack()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) // Stub Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) // Switch to the MCA/INIT stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) // r2 contains the return address, r3 contains either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) // On entry RBS is still on the original stack, this routine switches RBS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) // to use the MCA/INIT stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) // On entry, sos->pal_min_state is physical, on exit it is virtual.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) //--
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) ia64_new_stack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) add regs=MCA_PT_REGS_OFFSET, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) add temp2=MCA_SOS_OFFSET+SOS(PAL_MIN_STATE), r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) mov b0=r2 // save return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) GET_IA64_MCA_DATA(temp1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) invala
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) add temp2=temp2, temp1 // struct ia64_sal_os_state.pal_min_state on MCA or INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) add regs=regs, temp1 // struct pt_regs on MCA or INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) // Address of minstate area provided by PAL is physical, uncacheable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) // Convert to Linux virtual address in region 6 for C code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) ld8 ms=[temp2] // pal_min_state, physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) dep temp1=-1,ms,62,2 // set region 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) mov temp3=IA64_RBS_OFFSET-MCA_PT_REGS_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) st8 [temp2]=temp1 // pal_min_state, virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) add temp4=temp3, regs // start of bspstore on new stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) mov ar.bspstore=temp4 // switch RBS to MCA/INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) flushrs // must be first in group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) br.sptk b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) //EndStub//////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) //++
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) // Name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) // ia64_old_stack()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) // Stub Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) // Switch to the old stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) // r2 contains the return address, r3 contains either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) // On entry, pal_min_state is virtual, on exit it is physical.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) // On entry RBS is on the MCA/INIT stack, this routine switches RBS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) // back to the previous stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) // The psr is set to all zeroes. SAL return requires either all zeroes or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) // just psr.mc set. Leaving psr.mc off allows INIT to be issued if this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) // code does not perform correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) // The dirty registers at the time of the event were flushed to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) // MCA/INIT stack in ia64_pt_regs_save(). Restore the dirty registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) // before reverting to the previous bspstore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) //--
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) ia64_old_stack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) add regs=MCA_PT_REGS_OFFSET, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) mov b0=r2 // save return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) GET_IA64_MCA_DATA(temp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) LOAD_PHYSICAL(p0,temp1,1f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) mov cr.ipsr=r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) mov cr.ifs=r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) mov cr.iip=temp1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) invala
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) rfi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) add regs=regs, temp2 // struct pt_regs on MCA or INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) add temp1=PT(LOADRS), regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) ld8 temp2=[temp1],PT(AR_BSPSTORE)-PT(LOADRS) // restore loadrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) ld8 temp3=[temp1],PT(AR_RNAT)-PT(AR_BSPSTORE) // restore ar.bspstore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) mov ar.rsc=temp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) loadrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) ld8 temp4=[temp1] // restore ar.rnat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) mov ar.bspstore=temp3 // back to old stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) mov ar.rnat=temp4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) br.sptk b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) //EndStub//////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) //++
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) // Name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) // ia64_set_kernel_registers()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) // Stub Description:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) // Set the registers that are required by the C code in order to run on an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) // MCA/INIT stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) // r2 contains the return address, r3 contains either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) // IA64_MCA_CPU_MCA_STACK_OFFSET or IA64_MCA_CPU_INIT_STACK_OFFSET.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) //--
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ia64_set_kernel_registers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) add temp3=MCA_SP_OFFSET, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) mov b0=r2 // save return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) GET_IA64_MCA_DATA(temp1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) add r13=temp1, r3 // set current to start of MCA/INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) add r20=temp1, r3 // physical start of MCA/INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) DATA_PA_TO_VA(r12,temp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) DATA_PA_TO_VA(r13,temp3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) mov IA64_KR(CURRENT)=r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * any dependencies on the algorithm in ia64_switch_to(), just purge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * any existing CURRENT_STACK mapping and insert the new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) shl r16=r16,IA64_GRANULE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) dep r16=-1,r16,61,3 // virtual granule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) ptr.d r16,r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) movl r21=PAGE_KERNEL // page properties
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) mov IA64_KR(CURRENT_STACK)=r16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) or r21=r20,r21 // construct PA | page properties
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) mov cr.itir=r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) mov cr.ifa=r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) mov r20=IA64_TR_CURRENT_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) movl r17=FPSR_DEFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) itr.d dtr[r20]=r21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) br.sptk b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) //EndStub//////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) #undef ms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) #undef regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) #undef temp1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) #undef temp2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) #undef temp3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) #undef temp4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) // Support function for mca.c, it is here to avoid using inline asm. Given the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) // address of an rnat slot, if that address is below the current ar.bspstore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) // then return the contents of that slot, otherwise return the contents of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) // ar.rnat.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) GLOBAL_ENTRY(ia64_get_rnat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) alloc r14=ar.pfs,1,0,0,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) mov ar.rsc=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) mov r14=ar.bspstore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) cmp.lt p6,p7=in0,r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) (p6) ld8 r8=[in0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) (p7) mov r8=ar.rnat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) mov ar.rsc=3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) br.ret.sptk.many rp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) END(ia64_get_rnat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) // void ia64_set_psr_mc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) // Set psr.mc bit to mask MCA/INIT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) GLOBAL_ENTRY(ia64_set_psr_mc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) rsm psr.i | psr.ic // disable interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) mov r14 = psr // get psr{36:35,31:0}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) movl r15 = 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) dep r14 = -1, r14, PSR_MC, 1 // set psr.mc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) dep r14 = -1, r14, PSR_IC, 1 // set psr.ic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) dep r14 = -1, r14, PSR_BN, 1 // keep bank1 in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) mov cr.ipsr = r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) mov cr.ifs = r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) mov cr.iip = r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) rfi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) br.ret.sptk.many rp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) END(ia64_set_psr_mc)