^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/asi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/tsb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) .align 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) kvmap_itlb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /* g6: TAG TARGET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) mov TLB_TAG_ACCESS, %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) ldxa [%g4] ASI_IMMU, %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /* The kernel executes in context zero, therefore we do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * need to clear the context ID bits out of %g4 here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* sun4v_itlb_miss branches here with the missing virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * address already loaded into %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) kvmap_itlb_4v:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* Catch kernel NULL pointer calls. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) sethi %hi(PAGE_SIZE), %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) cmp %g4, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) blu,pn %xcc, kvmap_itlb_longpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) kvmap_itlb_tsb_miss:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) sethi %hi(LOW_OBP_ADDRESS), %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) cmp %g4, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) blu,pn %xcc, kvmap_itlb_vmalloc_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) mov 0x1, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) sllx %g5, 32, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) cmp %g4, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) blu,pn %xcc, kvmap_itlb_obp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) kvmap_itlb_vmalloc_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) TSB_LOCK_TAG(%g1, %g2, %g7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) TSB_WRITE(%g1, %g5, %g6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /* fallthrough to TLB load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) kvmap_itlb_load:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) 661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) .section .sun4v_2insn_patch, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) .word 661b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* For sun4v the ASI_ITLB_DATA_IN store and the retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * instruction get nop'd out and we get here to branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * to the sun4v tlb load code. The registers are setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * %g4: vaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * %g5: PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * %g6: TAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * The sun4v TLB load wants the PTE in %g3 so we fix that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * up here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ba,pt %xcc, sun4v_itlb_load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) mov %g5, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) kvmap_itlb_longpath:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) 661: rdpr %pstate, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) .section .sun4v_2insn_patch, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) .word 661b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) SET_GL(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) rdpr %tpc, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ba,pt %xcc, sparc64_realfault_common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) mov FAULT_CODE_ITLB, %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) kvmap_itlb_obp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) TSB_LOCK_TAG(%g1, %g2, %g7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) TSB_WRITE(%g1, %g5, %g6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) ba,pt %xcc, kvmap_itlb_load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) kvmap_dtlb_obp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) TSB_LOCK_TAG(%g1, %g2, %g7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) TSB_WRITE(%g1, %g5, %g6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ba,pt %xcc, kvmap_dtlb_load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) kvmap_linear_early:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) sethi %hi(kern_linear_pte_xor), %g7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) ldx [%g7 + %lo(kern_linear_pte_xor)], %g2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) ba,pt %xcc, kvmap_dtlb_tsb4m_load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) xor %g2, %g4, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) .align 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) kvmap_dtlb_tsb4m_load:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) TSB_LOCK_TAG(%g1, %g2, %g7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) TSB_WRITE(%g1, %g5, %g6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) ba,pt %xcc, kvmap_dtlb_load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) kvmap_dtlb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* %g6: TAG TARGET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) mov TLB_TAG_ACCESS, %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) ldxa [%g4] ASI_DMMU, %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* The kernel executes in context zero, therefore we do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * need to clear the context ID bits out of %g4 here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* sun4v_dtlb_miss branches here with the missing virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * address already loaded into %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) kvmap_dtlb_4v:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) brgez,pn %g4, kvmap_dtlb_nonlinear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #ifdef CONFIG_DEBUG_PAGEALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* Index through the base page size TSB even for linear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * mappings when using page allocation debugging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* Linear mapping TSB lookup failed. Fallthrough to kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * page table based lookup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) .globl kvmap_linear_patch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) kvmap_linear_patch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ba,a,pt %xcc, kvmap_linear_early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) kvmap_dtlb_vmalloc_addr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) TSB_LOCK_TAG(%g1, %g2, %g7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) TSB_WRITE(%g1, %g5, %g6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* fallthrough to TLB load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) kvmap_dtlb_load:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) .section .sun4v_2insn_patch, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) .word 661b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* For sun4v the ASI_DTLB_DATA_IN store and the retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * instruction get nop'd out and we get here to branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * to the sun4v tlb load code. The registers are setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * %g4: vaddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * %g5: PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * %g6: TAG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * The sun4v TLB load wants the PTE in %g3 so we fix that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * up here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ba,pt %xcc, sun4v_dtlb_load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) mov %g5, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #ifdef CONFIG_SPARSEMEM_VMEMMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) kvmap_vmemmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) ba,a,pt %xcc, kvmap_dtlb_load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) kvmap_dtlb_nonlinear:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* Catch kernel NULL pointer derefs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) sethi %hi(PAGE_SIZE), %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) cmp %g4, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) bleu,pn %xcc, kvmap_dtlb_longpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #ifdef CONFIG_SPARSEMEM_VMEMMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* Do not use the TSB for vmemmap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) sethi %hi(VMEMMAP_BASE), %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ldx [%g5 + %lo(VMEMMAP_BASE)], %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) cmp %g4,%g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) bgeu,pn %xcc, kvmap_vmemmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) kvmap_dtlb_tsbmiss:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) sethi %hi(MODULES_VADDR), %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) cmp %g4, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) blu,pn %xcc, kvmap_dtlb_longpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) sethi %hi(VMALLOC_END), %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) ldx [%g5 + %lo(VMALLOC_END)], %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) cmp %g4, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) bgeu,pn %xcc, kvmap_dtlb_longpath
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) kvmap_check_obp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) sethi %hi(LOW_OBP_ADDRESS), %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) cmp %g4, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) blu,pn %xcc, kvmap_dtlb_vmalloc_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) mov 0x1, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) sllx %g5, 32, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) cmp %g4, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) blu,pn %xcc, kvmap_dtlb_obp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ba,pt %xcc, kvmap_dtlb_vmalloc_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) kvmap_dtlb_longpath:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 661: rdpr %pstate, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) .section .sun4v_2insn_patch, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) .word 661b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) SET_GL(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ldxa [%g0] ASI_SCRATCHPAD, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) rdpr %tl, %g3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) cmp %g3, 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 661: mov TLB_TAG_ACCESS, %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) ldxa [%g4] ASI_DMMU, %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) .section .sun4v_2insn_patch, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) .word 661b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /* The kernel executes in context zero, therefore we do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * need to clear the context ID bits out of %g5 here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) be,pt %xcc, sparc64_realfault_common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) mov FAULT_CODE_DTLB, %g4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ba,pt %xcc, winfix_trampoline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) nop