^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* We need to carefully read the error status, ACK the errors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * prevent recursive traps, and pass the information on to C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * code for logging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * We pass the AFAR in as-is, and we encode the status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * information as described in asm-sparc64/sfafsr.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) .type __spitfire_access_error,#function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) __spitfire_access_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /* Disable ESTATE error reporting so that we do not take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * recursive traps and RED state the processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) stxa %g0, [%g0] ASI_ESTATE_ERROR_EN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) membar #Sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) mov UDBE_UE, %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* __spitfire_cee_trap branches here with AFSR in %g4 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * UDBE_CE in %g1. It only clears ESTATE_ERR_CE in the ESTATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Error Enable register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) __spitfire_cee_trap_continue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) ldxa [%g0] ASI_AFAR, %g5 ! Get AFAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) rdpr %tt, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) and %g3, 0x1ff, %g3 ! Paranoia
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) sllx %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) or %g4, %g3, %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) rdpr %tl, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) cmp %g3, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) mov 1, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) bleu %xcc, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) sllx %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) or %g4, %g3, %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* Read in the UDB error register state, clearing the sticky
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * error bits as-needed. We only clear them if the UE bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * set. Likewise, __spitfire_cee_trap below will only do so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * if the CE bit is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * NOTE: UltraSparc-I/II have high and low UDB error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * registers, corresponding to the two UDB units
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * present on those chips. UltraSparc-IIi only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * has a single UDB, called "SDB" in the manual.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * For IIi the upper UDB register always reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * as zero so for our purposes things will just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * work with the checks below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) 1: ldxa [%g0] ASI_UDBH_ERROR_R, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) and %g3, 0x3ff, %g7 ! Paranoia
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) sllx %g7, SFSTAT_UDBH_SHIFT, %g7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) or %g4, %g7, %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) be,pn %xcc, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) stxa %g3, [%g0] ASI_UDB_ERROR_W
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) membar #Sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) 1: mov 0x18, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) ldxa [%g3] ASI_UDBL_ERROR_R, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) and %g3, 0x3ff, %g7 ! Paranoia
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) sllx %g7, SFSTAT_UDBL_SHIFT, %g7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) or %g4, %g7, %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) be,pn %xcc, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) mov 0x18, %g7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) stxa %g3, [%g7] ASI_UDB_ERROR_W
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) membar #Sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) 1: /* Ok, now that we've latched the error state, clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * sticky bits in the AFSR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) stxa %g4, [%g0] ASI_AFSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) membar #Sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) rdpr %tl, %g2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) cmp %g2, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) rdpr %pil, %g2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) bleu,pt %xcc, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) wrpr %g0, PIL_NORMAL_MAX, %pil
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) ba,pt %xcc, etraptl1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) rd %pc, %g7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ba,a,pt %xcc, 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) 1: ba,pt %xcc, etrap_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) rd %pc, %g7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #ifdef CONFIG_TRACE_IRQFLAGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) call trace_hardirqs_off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) mov %l4, %o1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) mov %l5, %o2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) call spitfire_access_error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) add %sp, PTREGS_OFF, %o0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ba,a,pt %xcc, rtrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .size __spitfire_access_error,.-__spitfire_access_error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* This is the trap handler entry point for ECC correctable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * errors. They are corrected, but we listen for the trap so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * that the event can be logged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * Disrupting errors are either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * 1) single-bit ECC errors during UDB reads to system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * 2) data parity errors during write-back events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * As far as I can make out from the manual, the CEE trap is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * only for correctable errors during memory read accesses by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * the front-end of the processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * The code below is only for trap level 1 CEE events, as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * is the only situation where we can safely record and log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * For trap level >1 we just clear the CE bit in the AFSR and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * This is just like __spiftire_access_error above, but it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * specifically handles correctable errors. If an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * uncorrectable error is indicated in the AFSR we will branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * directly above to __spitfire_access_error to handle it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * instead. Uncorrectable therefore takes priority over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * correctable, and the error logging C code will notice this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * case by inspecting the trap type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) .type __spitfire_cee_trap,#function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) __spitfire_cee_trap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) mov 1, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) sllx %g3, SFAFSR_UE_SHIFT, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) andcc %g4, %g3, %g0 ! Check for UE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) bne,pn %xcc, __spitfire_access_error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* Ok, in this case we only have a correctable error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * Indicate we only wish to capture that state in register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * %g1, and we only disable CE error reporting unlike UE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * handling which disables all errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ldxa [%g0] ASI_ESTATE_ERROR_EN, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) andn %g3, ESTATE_ERR_CE, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) stxa %g3, [%g0] ASI_ESTATE_ERROR_EN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) membar #Sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ba,pt %xcc, __spitfire_cee_trap_continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) mov UDBE_CE, %g1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) .size __spitfire_cee_trap,.-__spitfire_cee_trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) .type __spitfire_data_access_exception_tl1,#function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) __spitfire_data_access_exception_tl1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) rdpr %pstate, %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) mov TLB_SFSR, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) mov DMMU_SFAR, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) membar #Sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) rdpr %tt, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) cmp %g3, 0x80 ! first win spill/fill trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) blu,pn %xcc, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) cmp %g3, 0xff ! last win spill/fill trap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) bgu,pn %xcc, 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) ba,pt %xcc, winfix_dax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) rdpr %tpc, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 1: sethi %hi(109f), %g7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ba,pt %xcc, etraptl1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 109: or %g7, %lo(109b), %g7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) mov %l4, %o1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) mov %l5, %o2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) call spitfire_data_access_exception_tl1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) add %sp, PTREGS_OFF, %o0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ba,a,pt %xcc, rtrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) .size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) .type __spitfire_data_access_exception,#function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) __spitfire_data_access_exception:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) rdpr %pstate, %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) mov TLB_SFSR, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) mov DMMU_SFAR, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) membar #Sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) sethi %hi(109f), %g7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ba,pt %xcc, etrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 109: or %g7, %lo(109b), %g7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) mov %l4, %o1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) mov %l5, %o2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) call spitfire_data_access_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) add %sp, PTREGS_OFF, %o0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ba,a,pt %xcc, rtrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) .size __spitfire_data_access_exception,.-__spitfire_data_access_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) .type __spitfire_insn_access_exception_tl1,#function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) __spitfire_insn_access_exception_tl1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) rdpr %pstate, %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) mov TLB_SFSR, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) membar #Sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) sethi %hi(109f), %g7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) ba,pt %xcc, etraptl1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 109: or %g7, %lo(109b), %g7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) mov %l4, %o1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) mov %l5, %o2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) call spitfire_insn_access_exception_tl1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) add %sp, PTREGS_OFF, %o0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) ba,a,pt %xcc, rtrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) .size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) .type __spitfire_insn_access_exception,#function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) __spitfire_insn_access_exception:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) rdpr %pstate, %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) mov TLB_SFSR, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) membar #Sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) sethi %hi(109f), %g7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) ba,pt %xcc, etrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 109: or %g7, %lo(109b), %g7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) mov %l4, %o1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) mov %l5, %o2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) call spitfire_insn_access_exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) add %sp, PTREGS_OFF, %o0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ba,a,pt %xcc, rtrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) .size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception