^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * arch/ia64/kernel/ivt.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Stephane Eranian <eranian@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * David Mosberger <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2000, 2002-2003 Intel Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Asit Mallick <asit.k.mallick@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Suresh Siddha <suresh.b.siddha@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Kenneth Chen <kenneth.w.chen@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Fenghua Yu <fenghua.yu@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling for SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB handler now uses virtual PT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Copyright (C) 2005 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Dan Magenheimer <dan.magenheimer@hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Xen paravirtualization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * VA Linux Systems Japan K.K.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * pv_ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Yaozu (Eddie) Dong <eddie.dong@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * This file defines the interruption vector table used by the CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * It does not include one entry per possible cause of interruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * The first 20 entries of the table contain 64 bundles each while the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * remaining 48 entries contain only 16 bundles each.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * The 64 bundles are used to allow inlining the whole handler for critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * interruptions like TLB misses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * For each entry, the comment is as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * entry offset ----/ / / / /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * entry number ---------/ / / /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * size of the entry -------------/ / /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * vector name -------------------------------------/ /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * interruptions triggering this vector ----------------------/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * The table is 32KB in size and must be aligned on 32KB boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * (The CPU ignores the 15 lower bits of the address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * Table is based upon EAS2.6 (Oct 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <asm/asmmacro.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <asm/break.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <asm/kregs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <asm/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <asm/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) # define PSR_DEFAULT_BITS psr.ac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) # define PSR_DEFAULT_BITS 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * This lets you track the last eight faults that occurred on the CPU. Make sure ar.k2 isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * needed for something else before enabling this...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) # define DBG_FAULT(i) mov r16=ar.k2;; shl r16=r16,8;; add r16=(i),r16;;mov ar.k2=r16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) # define DBG_FAULT(i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #include "minstate.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define FAULT(n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) mov r31=pr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) mov r19=n;; /* prepare to save predicates */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) br.sptk.many dispatch_to_fault_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) .section .text..ivt,"ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) .align 32768 // align on 32KB boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) .global ia64_ivt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) EXPORT_DATA_SYMBOL(ia64_ivt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) ia64_ivt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) // 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) ENTRY(vhpt_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) DBG_FAULT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * The VHPT vector is invoked when the TLB entry for the virtual page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * is missing. This happens only as a result of a previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * (the "original") TLB miss, which may either be caused by an instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * fetch or a data access (or non-access).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * What we do here is normal TLB miss handing for the _original_ miss,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * followed by inserting the TLB entry for the virtual page table page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * that the VHPT walker was attempting to access. The latter gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * inserted as long as page table entry above pte level have valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * mappings for the faulting address. The TLB entry for the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * miss gets inserted only if the pte entry indicates that the page is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * do_page_fault gets invoked in the following cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * - the faulting virtual address uses unimplemented address bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * - the faulting virtual address has no valid page table mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) MOV_FROM_IFA(r16) // get address that caused the TLB miss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #ifdef CONFIG_HUGETLB_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) movl r18=PAGE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) MOV_FROM_ITIR(r25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) RSM_PSR_DT // use physical addressing for data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) mov r31=pr // save the predicate registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) mov r19=IA64_KR(PT_BASE) // get page table base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) shl r21=r16,3 // shift bit 60 into sign bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) shr.u r17=r16,61 // get the region number into r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) shr.u r22=r21,3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #ifdef CONFIG_HUGETLB_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) extr.u r26=r25,2,6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) cmp.ne p8,p0=r18,r26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) sub r27=r26,r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) (p8) dep r25=r18,r25,2,6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) (p8) shr r22=r22,r27
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) shr.u r18=r22,PGDIR_SHIFT // get bottom portion of pgd index bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) .pred.rel "mutex", p6, p7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #if CONFIG_PGTABLE_LEVELS == 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) shr.u r28=r22,PUD_SHIFT // shift pud index into position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) shr.u r18=r22,PMD_SHIFT // shift pmd index into position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ld8 r17=[r17] // get *pgd (may be 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #if CONFIG_PGTABLE_LEVELS == 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) dep r28=r28,r17,3,(PAGE_SHIFT-3) // r28=pud_offset(pgd,addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) shr.u r18=r22,PMD_SHIFT // shift pmd index into position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) (p7) ld8 r29=[r28] // get *pud (may be 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) (p7) cmp.eq.or.andcm p6,p7=r29,r0 // was pud_present(*pud) == NULL?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) dep r17=r18,r29,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pgd,addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) (p7) ld8 r20=[r17] // get *pmd (may be 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) shr.u r19=r22,PAGE_SHIFT // shift pte index into position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) (p7) cmp.eq.or.andcm p6,p7=r20,r0 // was pmd_present(*pmd) == NULL?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) dep r21=r19,r20,3,(PAGE_SHIFT-3) // r21=pte_offset(pmd,addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) (p7) ld8 r18=[r21] // read *pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) MOV_FROM_ISR(r19) // cr.isr bit 32 tells us if this is an insn miss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) (p7) tbit.z p6,p7=r18,_PAGE_P_BIT // page present bit cleared?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) MOV_FROM_IHA(r22) // get the VHPT address that caused the TLB miss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ;; // avoid RAW on p7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) (p7) tbit.nz.unc p10,p11=r19,32 // is it an instruction TLB miss?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) dep r23=0,r20,0,PAGE_SHIFT // clear low bits to get page address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ITC_I_AND_D(p10, p11, r18, r24) // insert the instruction TLB entry and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) // insert the data TLB entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) (p6) br.cond.spnt.many page_fault // handle bad address/page not present (page fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) MOV_TO_IFA(r22, r24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #ifdef CONFIG_HUGETLB_PAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) MOV_TO_ITIR(p8, r25, r24) // change to default page-size for VHPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * Now compute and insert the TLB entry for the virtual page table. We never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * execute in a page table page so there is no need to set the exception deferral
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) adds r24=__DIRTY_BITS_NO_ED|_PAGE_PL_0|_PAGE_AR_RW,r23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ITC_D(p7, r24, r25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * Tell the assemblers dependency-violation checker that the above "itc" instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * cannot possibly affect the following loads:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) dv_serialize_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Re-check pagetable entry. If they changed, we may have received a ptc.g
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * between reading the pagetable and the "itc". If so, flush the entry we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * inserted and retry. At this point, we have:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * r28 = equivalent of pud_offset(pgd, ifa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * r17 = equivalent of pmd_offset(pud, ifa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * r21 = equivalent of pte_offset(pmd, ifa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * r29 = *pud
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * r20 = *pmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * r18 = *pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ld8 r25=[r21] // read *pte again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) ld8 r26=[r17] // read *pmd again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #if CONFIG_PGTABLE_LEVELS == 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ld8 r19=[r28] // read *pud again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) cmp.ne p6,p7=r0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) cmp.ne.or.andcm p6,p7=r26,r20 // did *pmd change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #if CONFIG_PGTABLE_LEVELS == 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) cmp.ne.or.andcm p6,p7=r19,r29 // did *pud change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) mov r27=PAGE_SHIFT<<2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) (p6) ptc.l r22,r27 // purge PTE page translation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) (p7) cmp.ne.or.andcm p6,p7=r25,r18 // did *pte change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) (p6) ptc.l r16,r27 // purge translation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) mov pr=r31,-1 // restore predicate registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) END(vhpt_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) .org ia64_ivt+0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) // 0x0400 Entry 1 (size 64 bundles) ITLB (21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ENTRY(itlb_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) DBG_FAULT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * The ITLB handler accesses the PTE via the virtually mapped linear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * page table. If a nested TLB miss occurs, we switch into physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * mode, walk the page table, and then re-execute the PTE read and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * go on normally after that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) MOV_FROM_IFA(r16) // get virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) mov r29=b0 // save b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) mov r31=pr // save predicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) .itlb_fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) MOV_FROM_IHA(r17) // get virtual address of PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) movl r30=1f // load nested fault continuation point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 1: ld8 r18=[r17] // read *pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) mov b0=r29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) (p6) br.cond.spnt page_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) ITC_I(p0, r18, r19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * Tell the assemblers dependency-violation checker that the above "itc" instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * cannot possibly affect the following loads:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) dv_serialize_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ld8 r19=[r17] // read *pte again and see if same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) mov r20=PAGE_SHIFT<<2 // setup page size for purge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) cmp.ne p7,p0=r18,r19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) (p7) ptc.l r16,r20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) mov pr=r31,-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) END(itlb_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) .org ia64_ivt+0x0800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) // 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) ENTRY(dtlb_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) DBG_FAULT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * The DTLB handler accesses the PTE via the virtually mapped linear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * page table. If a nested TLB miss occurs, we switch into physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * mode, walk the page table, and then re-execute the PTE read and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * go on normally after that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) MOV_FROM_IFA(r16) // get virtual address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) mov r29=b0 // save b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) mov r31=pr // save predicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) dtlb_fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) MOV_FROM_IHA(r17) // get virtual address of PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) movl r30=1f // load nested fault continuation point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 1: ld8 r18=[r17] // read *pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) mov b0=r29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) tbit.z p6,p0=r18,_PAGE_P_BIT // page present bit cleared?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) (p6) br.cond.spnt page_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ITC_D(p0, r18, r19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * Tell the assemblers dependency-violation checker that the above "itc" instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * cannot possibly affect the following loads:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) dv_serialize_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ld8 r19=[r17] // read *pte again and see if same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) mov r20=PAGE_SHIFT<<2 // setup page size for purge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) cmp.ne p7,p0=r18,r19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) (p7) ptc.l r16,r20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) mov pr=r31,-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) END(dtlb_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) .org ia64_ivt+0x0c00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) // 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) ENTRY(alt_itlb_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) DBG_FAULT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) MOV_FROM_IFA(r16) // get address that caused the TLB miss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) movl r17=PAGE_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) MOV_FROM_IPSR(p0, r21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) mov r31=pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) #ifdef CONFIG_DISABLE_VHPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) shr.u r22=r16,61 // get the region number into r21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) cmp.gt p8,p0=6,r22 // user mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) THASH(p8, r17, r16, r23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) MOV_TO_IHA(p8, r17, r23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) (p8) mov r29=b0 // save b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) (p8) br.cond.dptk .itlb_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) and r19=r19,r16 // clear ed, reserved bits, and PTE control bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) shr.u r18=r16,57 // move address bit 61 to bit 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) andcm r18=0x10,r18 // bit 4=~address-bit(61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) cmp.ne p8,p0=r0,r23 // psr.cpl != 0?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) or r19=r17,r19 // insert PTE control bits into r19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) or r19=r19,r18 // set bit 4 (uncached) if the access was to region 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) (p8) br.cond.spnt page_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ITC_I(p0, r19, r18) // insert the TLB entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) mov pr=r31,-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) END(alt_itlb_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) .org ia64_ivt+0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) // 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ENTRY(alt_dtlb_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) DBG_FAULT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) MOV_FROM_IFA(r16) // get address that caused the TLB miss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) movl r17=PAGE_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) MOV_FROM_ISR(r20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) MOV_FROM_IPSR(p0, r21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) mov r31=pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) mov r24=PERCPU_ADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #ifdef CONFIG_DISABLE_VHPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) shr.u r22=r16,61 // get the region number into r21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) cmp.gt p8,p0=6,r22 // access to region 0-5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) THASH(p8, r17, r16, r25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) MOV_TO_IHA(p8, r17, r25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) (p8) mov r29=b0 // save b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) (p8) br.cond.dptk dtlb_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) cmp.ge p10,p11=r16,r24 // access to per_cpu_data?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) tbit.z p12,p0=r16,61 // access to region 6?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) mov r25=PERCPU_PAGE_SHIFT << 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) mov r26=PERCPU_PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) nop.m 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) nop.b 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) (p10) mov r19=IA64_KR(PER_CPU_DATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) (p11) and r19=r19,r16 // clear non-ppn fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) extr.u r23=r21,IA64_PSR_CPL0_BIT,2 // extract psr.cpl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) and r22=IA64_ISR_CODE_MASK,r20 // get the isr.code field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) tbit.nz p6,p7=r20,IA64_ISR_SP_BIT // is speculation bit on?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) tbit.nz p9,p0=r20,IA64_ISR_NA_BIT // is non-access bit on?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) (p10) sub r19=r19,r26
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) MOV_TO_ITIR(p10, r25, r24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) cmp.ne p8,p0=r0,r23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) (p9) cmp.eq.or.andcm p6,p7=IA64_ISR_CODE_LFETCH,r22 // check isr.code field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) (p12) dep r17=-1,r17,4,1 // set ma=UC for region 6 addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) (p8) br.cond.spnt page_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) dep r21=-1,r21,IA64_PSR_ED_BIT,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) or r19=r19,r17 // insert PTE control bits into r19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) MOV_TO_IPSR(p6, r21, r24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) ITC_D(p7, r19, r18) // insert the TLB entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) mov pr=r31,-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) END(alt_dtlb_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) .org ia64_ivt+0x1400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) // 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ENTRY(nested_dtlb_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * In the absence of kernel bugs, we get here when the virtually mapped linear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * page table is accessed non-speculatively (e.g., in the Dirty-bit, Instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * Access-bit, or Data Access-bit faults). If the DTLB entry for the virtual page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * table is missing, a nested TLB miss fault is triggered and control is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * transferred to this point. When this happens, we lookup the pte for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * faulting address by walking the page table in physical mode and return to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * continuation point passed in register r30 (or call page_fault if the address is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * not mapped).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * Input: r16: faulting address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * r29: saved b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * r30: continuation address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * r31: saved pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * Output: r17: physical address of PTE of faulting address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * r29: saved b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * r30: continuation address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * r31: saved pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Clobbered: b0, r18, r19, r21, r22, psr.dt (cleared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) RSM_PSR_DT // switch to using physical data addressing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) mov r19=IA64_KR(PT_BASE) // get the page table base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) shl r21=r16,3 // shift bit 60 into sign bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) MOV_FROM_ITIR(r18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) shr.u r17=r16,61 // get the region number into r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) extr.u r18=r18,2,6 // get the faulting page size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) cmp.eq p6,p7=5,r17 // is faulting address in region 5?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) add r22=-PAGE_SHIFT,r18 // adjustment for hugetlb address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) add r18=PGDIR_SHIFT-PAGE_SHIFT,r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) shr.u r22=r16,r22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) shr.u r18=r16,r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) (p7) dep r17=r17,r19,(PAGE_SHIFT-3),3 // put region number bits in place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) LOAD_PHYSICAL(p6, r19, swapper_pg_dir) // region 5 is rooted at swapper_pg_dir
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) .pred.rel "mutex", p6, p7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) (p6) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) (p7) shr.u r21=r21,PGDIR_SHIFT+PAGE_SHIFT-3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) (p6) dep r17=r18,r19,3,(PAGE_SHIFT-3) // r17=pgd_offset for region 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) (p7) dep r17=r18,r17,3,(PAGE_SHIFT-6) // r17=pgd_offset for region[0-4]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) cmp.eq p7,p6=0,r21 // unused address bits all zeroes?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) #if CONFIG_PGTABLE_LEVELS == 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) shr.u r18=r22,PUD_SHIFT // shift pud index into position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) shr.u r18=r22,PMD_SHIFT // shift pmd index into position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) ld8 r17=[r17] // get *pgd (may be 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) (p7) cmp.eq p6,p7=r17,r0 // was pgd_present(*pgd) == NULL?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=p[u|m]d_offset(pgd,addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) #if CONFIG_PGTABLE_LEVELS == 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) (p7) ld8 r17=[r17] // get *pud (may be 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) shr.u r18=r22,PMD_SHIFT // shift pmd index into position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pud_present(*pud) == NULL?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) dep r17=r18,r17,3,(PAGE_SHIFT-3) // r17=pmd_offset(pud,addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) (p7) ld8 r17=[r17] // get *pmd (may be 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) shr.u r19=r22,PAGE_SHIFT // shift pte index into position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) (p7) cmp.eq.or.andcm p6,p7=r17,r0 // was pmd_present(*pmd) == NULL?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) dep r17=r19,r17,3,(PAGE_SHIFT-3) // r17=pte_offset(pmd,addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) (p6) br.cond.spnt page_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) mov b0=r30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) br.sptk.many b0 // return to continuation point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) END(nested_dtlb_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) .org ia64_ivt+0x1800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) // 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) ENTRY(ikey_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) DBG_FAULT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) FAULT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) END(ikey_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) .org ia64_ivt+0x1c00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ENTRY(dkey_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) DBG_FAULT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) FAULT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) END(dkey_miss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) .org ia64_ivt+0x2000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) // 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ENTRY(dirty_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) DBG_FAULT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * What we do here is to simply turn on the dirty bit in the PTE. We need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * update both the page-table and the TLB entry. To efficiently access the PTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * we address it through the virtual page table. Most likely, the TLB entry for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * the relevant virtual page table page is still present in the TLB so we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * normally do this without additional TLB misses. In case the necessary virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * page table TLB entry isn't present, we take a nested TLB miss hit where we look
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * up the physical address of the L3 PTE and then continue at label 1 below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) MOV_FROM_IFA(r16) // get the address that caused the fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) movl r30=1f // load continuation point in case of nested fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) mov r29=b0 // save b0 in case of nested fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) mov r31=pr // save pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) mov r28=ar.ccv // save ar.ccv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 1: ld8 r18=[r17]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ;; // avoid RAW on r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) mov ar.ccv=r18 // set compare value for cmpxchg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only update if page is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) mov r24=PAGE_SHIFT<<2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) (p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ITC_D(p6, r25, r18) // install updated PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * Tell the assemblers dependency-violation checker that the above "itc" instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * cannot possibly affect the following loads:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) dv_serialize_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ld8 r18=[r17] // read PTE again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) cmp.eq p6,p7=r18,r25 // is it same as the newly installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) (p7) ptc.l r16,r24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) mov b0=r29 // restore b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) mov ar.ccv=r28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 1: ld8 r18=[r17]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) ;; // avoid RAW on r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) or r18=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) mov b0=r29 // restore b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) st8 [r17]=r18 // store back updated PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) ITC_D(p0, r18, r16) // install updated PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) mov pr=r31,-1 // restore pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) END(dirty_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) .org ia64_ivt+0x2400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) // 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) ENTRY(iaccess_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) DBG_FAULT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) // Like Entry 8, except for instruction access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) MOV_FROM_IFA(r16) // get the address that caused the fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) movl r30=1f // load continuation point in case of nested fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) mov r31=pr // save predicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) #ifdef CONFIG_ITANIUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * Erratum 10 (IFA may contain incorrect address) has "NoFix" status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) MOV_FROM_IPSR(p0, r17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) MOV_FROM_IIP(r18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) tbit.z p6,p0=r17,IA64_PSR_IS_BIT // IA64 instruction set?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) (p6) mov r16=r18 // if so, use cr.iip instead of cr.ifa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) #endif /* CONFIG_ITANIUM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) mov r29=b0 // save b0 in case of nested fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) mov r28=ar.ccv // save ar.ccv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 1: ld8 r18=[r17]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) mov ar.ccv=r18 // set compare value for cmpxchg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) or r25=_PAGE_A,r18 // set the accessed bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) mov r24=PAGE_SHIFT<<2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) (p6) cmp.eq p6,p7=r26,r18 // Only if page present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ITC_I(p6, r25, r26) // install updated PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * Tell the assemblers dependency-violation checker that the above "itc" instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * cannot possibly affect the following loads:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) dv_serialize_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) ld8 r18=[r17] // read PTE again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) cmp.eq p6,p7=r18,r25 // is it same as the newly installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) (p7) ptc.l r16,r24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) mov b0=r29 // restore b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) mov ar.ccv=r28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) #else /* !CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 1: ld8 r18=[r17]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) or r18=_PAGE_A,r18 // set the accessed bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) mov b0=r29 // restore b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) st8 [r17]=r18 // store back updated PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ITC_I(p0, r18, r16) // install updated PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) #endif /* !CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) mov pr=r31,-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) END(iaccess_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) .org ia64_ivt+0x2800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) // 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) ENTRY(daccess_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) DBG_FAULT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) // Like Entry 8, except for data access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) MOV_FROM_IFA(r16) // get the address that caused the fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) movl r30=1f // load continuation point in case of nested fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) THASH(p0, r17, r16, r18) // compute virtual address of L3 PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) mov r31=pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) mov r29=b0 // save b0 in case of nested fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) mov r28=ar.ccv // save ar.ccv
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 1: ld8 r18=[r17]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) ;; // avoid RAW on r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) mov ar.ccv=r18 // set compare value for cmpxchg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) or r25=_PAGE_A,r18 // set the dirty bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) (p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) mov r24=PAGE_SHIFT<<2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) (p6) cmp.eq p6,p7=r26,r18 // Only if page is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) ITC_D(p6, r25, r26) // install updated PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * Tell the assemblers dependency-violation checker that the above "itc" instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * cannot possibly affect the following loads:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) dv_serialize_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) ld8 r18=[r17] // read PTE again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) cmp.eq p6,p7=r18,r25 // is it same as the newly installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) (p7) ptc.l r16,r24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) mov ar.ccv=r28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 1: ld8 r18=[r17]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ;; // avoid RAW on r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) or r18=_PAGE_A,r18 // set the accessed bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) st8 [r17]=r18 // store back updated PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ITC_D(p0, r18, r16) // install updated PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) mov b0=r29 // restore b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) mov pr=r31,-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) END(daccess_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) .org ia64_ivt+0x2c00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) // 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) ENTRY(break_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * The streamlined system call entry/exit paths only save/restore the initial part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * of pt_regs. This implies that the callers of system-calls must adhere to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * normal procedure calling conventions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * Registers to be saved & restored:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * CR registers: cr.ipsr, cr.iip, cr.ifs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * AR registers: ar.unat, ar.pfs, ar.rsc, ar.rnat, ar.bspstore, ar.fpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * others: pr, b0, b6, loadrs, r1, r11, r12, r13, r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * Registers to be restored only:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * r8-r11: output value from the system call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * During system call exit, scratch registers (including r15) are modified/cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * to prevent leaking bits from kernel to user level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) DBG_FAULT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) MOV_FROM_IPSR(p0, r29) // M2 (12 cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) mov r31=pr // I0 (2 cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) MOV_FROM_IIM(r17) // M2 (2 cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) mov.m r27=ar.rsc // M2 (12 cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) mov r18=__IA64_BREAK_SYSCALL // A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) mov.m ar.rsc=0 // M2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) mov.m r21=ar.fpsr // M2 (12 cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) mov r19=b6 // I0 (2 cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) mov.m r23=ar.bspstore // M2 (12 cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) mov.m r24=ar.rnat // M2 (5 cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) mov.i r26=ar.pfs // I0 (2 cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) invala // M0|1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) nop.m 0 // M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) mov r20=r1 // A save r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) nop.m 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) movl r30=sys_call_table // X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) MOV_FROM_IIP(r28) // M2 (2 cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) cmp.eq p0,p7=r18,r17 // I0 is this a system call?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) (p7) br.cond.spnt non_syscall // B no ->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) // From this point on, we are definitely on the syscall-path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) // and we can use (non-banked) scratch registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ///////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) mov r1=r16 // A move task-pointer to "addl"-addressable reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) mov r2=r16 // A setup r2 for ia64_syscall_setup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = ¤t_thread_info()->flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) adds r15=-1024,r15 // A subtract 1024 from syscall number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) mov r3=NR_syscalls - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) cmp.leu p6,p7=r15,r3 // A syscall number in range?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) (p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) mov.m ar.bspstore=r22 // M2 switch to kernel RBS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) cmp.eq p8,p9=2,r8 // A isr.ei==2?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) (p8) mov r8=0 // A clear ei to 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) (p7) movl r30=sys_ni_syscall // X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) (p8) adds r28=16,r28 // A switch cr.iip to next bundle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) (p9) adds r8=1,r8 // A increment ei to next slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) mov b6=r30 // I0 setup syscall handler branch reg early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) nop.i 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) mov.m r25=ar.unat // M2 (5 cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) adds r15=1024,r15 // A restore original syscall number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) // If any of the above loads miss in L1D, we'll stall here until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) // the data arrives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ///////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) mov b6=r30 // I0 setup syscall handler branch reg early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) mov r18=ar.bsp // M2 (12 cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) (pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) .back_from_break_fixup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) (pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) br.call.sptk.many b7=ia64_syscall_setup // B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) // mov.m r30=ar.itc is called in advance, and r13 is current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) (pKStk) br.cond.spnt .skip_accounting // B unlikely skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // M get last stamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // M time at leave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // M cumulated stime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ld8 r21=[r17] // M cumulated utime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) sub r22=r19,r18 // A stime before leave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // M update stamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) sub r18=r30,r19 // A elapsed time in user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) add r20=r20,r22 // A sum stime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) add r21=r21,r18 // A sum utime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) st8 [r16]=r20 // M update stime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) st8 [r17]=r21 // M update utime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) .skip_accounting:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) nop 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) BSW_1(r2, r14) // B (6 cyc) regs are saved, switch to bank 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r16) // M2 now it's safe to re-enable intr.-collection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) // M0 ensure interruption collection is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) movl r3=ia64_ret_from_syscall // X
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) mov rp=r3 // I0 set the real return addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) SSM_PSR_I(p15, p15, r16) // M2 restore psr.i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) (p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) // NOT REACHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ///////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) // On entry, we optimistically assumed that we're coming from user-space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) // For the rare cases where a system-call is done from within the kernel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) // we fix things up at this point:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) .break_fixup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) mov ar.rnat=r24 // M2 restore kernel's AR.RNAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) br.cond.sptk .back_from_break_fixup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) END(break_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) .org ia64_ivt+0x3000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) // 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) ENTRY(interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /* interrupt handler has become too big to fit this area. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) br.sptk.many __interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) END(interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) .org ia64_ivt+0x3400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) // 0x3400 Entry 13 (size 64 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) DBG_FAULT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) FAULT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) .org ia64_ivt+0x3800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) // 0x3800 Entry 14 (size 64 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) DBG_FAULT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) FAULT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * There is no particular reason for this code to be here, other than that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * there happens to be space here that would go unused otherwise. If this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * fault ever gets "unreserved", simply moved the following code to a more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * suitable spot...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * ia64_syscall_setup() is a separate subroutine so that it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * allocate stacked registers so it can safely demine any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * potential NaT values from the input registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * On entry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * - executing on bank 0 or bank 1 register set (doesn't matter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * - r1: stack pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * - r2: current task pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * - r3: preserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * - r11: original contents (saved ar.pfs to be saved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * - r12: original contents (sp to be saved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * - r13: original contents (tp to be saved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * - r15: original contents (syscall # to be saved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * - r18: saved bsp (after switching to kernel stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * - r19: saved b6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * - r20: saved r1 (gp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * - r21: saved ar.fpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * - r22: kernel's register backing store base (krbs_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * - r23: saved ar.bspstore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * - r24: saved ar.rnat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * - r25: saved ar.unat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * - r26: saved ar.pfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * - r27: saved ar.rsc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * - r28: saved cr.iip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * - r29: saved cr.ipsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * - r30: ar.itc for accounting (don't touch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * - r31: saved pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * - b0: original contents (to be saved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * On exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * - p10: TRUE if syscall is invoked with more than 8 out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * registers or r15's Nat is true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * - r1: kernel's gp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * - r3: preserved (same as on entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * - r8: -EINVAL if p10 is true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * - r12: points to kernel stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * - r13: points to current task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * - r14: preserved (same as on entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * - p13: preserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * - p15: TRUE if interrupts need to be re-enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * - ar.fpsr: set to kernel settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * - b6: preserved (same as on entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) GLOBAL_ENTRY(ia64_syscall_setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) #if PT(B6) != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) # error This code assumes that b6 is the first field in pt_regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) st8 [r1]=r19 // save b6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) add r16=PT(CR_IPSR),r1 // initialize first base pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) add r17=PT(R11),r1 // initialize second base pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) alloc r19=ar.pfs,8,0,0,0 // ensure in0-in7 are writable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) st8 [r16]=r29,PT(AR_PFS)-PT(CR_IPSR) // save cr.ipsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) tnat.nz p8,p0=in0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) st8.spill [r17]=r11,PT(CR_IIP)-PT(R11) // save r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) tnat.nz p9,p0=in1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) (pKStk) mov r18=r0 // make sure r18 isn't NaT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) st8 [r16]=r26,PT(CR_IFS)-PT(AR_PFS) // save ar.pfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) st8 [r17]=r28,PT(AR_UNAT)-PT(CR_IIP) // save cr.iip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) mov r28=b0 // save b0 (2 cyc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) st8 [r17]=r25,PT(AR_RSC)-PT(AR_UNAT) // save ar.unat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) dep r19=0,r19,38,26 // clear all bits but 0..37 [I0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) (p8) mov in0=-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) st8 [r16]=r19,PT(AR_RNAT)-PT(CR_IFS) // store ar.pfs.pfm in cr.ifs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) extr.u r11=r19,7,7 // I0 // get sol of ar.pfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) and r8=0x7f,r19 // A // get sof of ar.pfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) st8 [r17]=r27,PT(AR_BSPSTORE)-PT(AR_RSC)// save ar.rsc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) tbit.nz p15,p0=r29,IA64_PSR_I_BIT // I0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) (p9) mov in1=-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) (pUStk) sub r18=r18,r22 // r18=RSE.ndirty*8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) tnat.nz p10,p0=in2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) add r11=8,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) (pKStk) adds r16=PT(PR)-PT(AR_RNAT),r16 // skip over ar_rnat field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) (pKStk) adds r17=PT(B0)-PT(AR_BSPSTORE),r17 // skip over ar_bspstore field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) tnat.nz p11,p0=in3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) (p10) mov in2=-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) tnat.nz p12,p0=in4 // [I0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) (p11) mov in3=-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) (pUStk) st8 [r16]=r24,PT(PR)-PT(AR_RNAT) // save ar.rnat
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) (pUStk) st8 [r17]=r23,PT(B0)-PT(AR_BSPSTORE) // save ar.bspstore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) shl r18=r18,16 // compute ar.rsc to be used for "loadrs"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) st8 [r16]=r31,PT(LOADRS)-PT(PR) // save predicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) st8 [r17]=r28,PT(R1)-PT(B0) // save b0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) tnat.nz p13,p0=in5 // [I0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) st8 [r16]=r18,PT(R12)-PT(LOADRS) // save ar.rsc value for "loadrs"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) st8.spill [r17]=r20,PT(R13)-PT(R1) // save original r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) (p12) mov in4=-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) .mem.offset 0,0; st8.spill [r16]=r12,PT(AR_FPSR)-PT(R12) // save r12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) .mem.offset 8,0; st8.spill [r17]=r13,PT(R15)-PT(R13) // save r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) (p13) mov in5=-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) tnat.nz p13,p0=in6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) mov r8=1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) (p9) tnat.nz p10,p0=r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) st8.spill [r17]=r15 // save r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) tnat.nz p8,p0=in7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) nop.i 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) mov r13=r2 // establish `current'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) movl r1=__gp // establish kernel global pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) (p13) mov in6=-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) (p8) mov in7=-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) movl r17=FPSR_DEFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) mov.m ar.fpsr=r17 // set ar.fpsr to kernel default value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) (p10) mov r8=-EINVAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) br.ret.sptk.many b7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) END(ia64_syscall_setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) .org ia64_ivt+0x3c00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) // 0x3c00 Entry 15 (size 64 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) DBG_FAULT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) FAULT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) .org ia64_ivt+0x4000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) // 0x4000 Entry 16 (size 64 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) DBG_FAULT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) FAULT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * There is no particular reason for this code to be here, other than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * that there happens to be space here that would go unused otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * If this fault ever gets "unreserved", simply moved the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * code to a more suitable spot...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * account_sys_enter is called from SAVE_MIN* macros if accounting is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * enabled and if the macro is entered from user mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) GLOBAL_ENTRY(account_sys_enter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) // mov.m r20=ar.itc is called in advance, and r13 is current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at left from kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) ld8 r23=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) ld8 r21=[r17] // cumulated utime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) sub r22=r19,r18 // stime before leave kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) st8 [r16]=r20,TI_AC_STIME-TI_AC_STAMP // update stamp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) sub r18=r20,r19 // elapsed time in user mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) add r23=r23,r22 // sum stime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) add r21=r21,r18 // sum utime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) st8 [r16]=r23 // update stime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) st8 [r17]=r21 // update utime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) br.ret.sptk.many rp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) END(account_sys_enter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) .org ia64_ivt+0x4400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) // 0x4400 Entry 17 (size 64 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) DBG_FAULT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) FAULT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) .org ia64_ivt+0x4800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) // 0x4800 Entry 18 (size 64 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) DBG_FAULT(18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) FAULT(18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) .org ia64_ivt+0x4c00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) // 0x4c00 Entry 19 (size 64 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) DBG_FAULT(19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) FAULT(19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) // --- End of long entries, Beginning of short entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) //
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) .org ia64_ivt+0x5000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) // 0x5000 Entry 20 (size 16 bundles) Page Not Present (10,22,49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) ENTRY(page_not_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) DBG_FAULT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) MOV_FROM_IFA(r16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) RSM_PSR_DT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * The Linux page fault handler doesn't expect non-present pages to be in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * the TLB. Flush the existing entry now, so we meet that expectation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) mov r17=PAGE_SHIFT<<2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) ptc.l r16,r17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) mov r31=pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) br.sptk.many page_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) END(page_not_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) .org ia64_ivt+0x5100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) // 0x5100 Entry 21 (size 16 bundles) Key Permission (13,25,52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) ENTRY(key_permission)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) DBG_FAULT(21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) MOV_FROM_IFA(r16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) RSM_PSR_DT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) mov r31=pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) br.sptk.many page_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) END(key_permission)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) .org ia64_ivt+0x5200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) // 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) ENTRY(iaccess_rights)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) DBG_FAULT(22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) MOV_FROM_IFA(r16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) RSM_PSR_DT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) mov r31=pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) br.sptk.many page_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) END(iaccess_rights)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) .org ia64_ivt+0x5300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) // 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) ENTRY(daccess_rights)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) DBG_FAULT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) MOV_FROM_IFA(r16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) RSM_PSR_DT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) mov r31=pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) br.sptk.many page_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) END(daccess_rights)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) .org ia64_ivt+0x5400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) // 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) ENTRY(general_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) DBG_FAULT(24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) MOV_FROM_ISR(r16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) mov r31=pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) cmp4.eq p6,p0=0,r16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) (p6) br.sptk.many dispatch_illegal_op_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) mov r19=24 // fault number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) br.sptk.many dispatch_to_fault_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) END(general_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) .org ia64_ivt+0x5500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) // 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) ENTRY(disabled_fp_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) DBG_FAULT(25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) rsm psr.dfh // ensure we can access fph
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) srlz.d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) mov r31=pr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) mov r19=25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) br.sptk.many dispatch_to_fault_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) END(disabled_fp_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) .org ia64_ivt+0x5600
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) ENTRY(nat_consumption)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) DBG_FAULT(26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) MOV_FROM_IPSR(p0, r16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) MOV_FROM_ISR(r17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) mov r31=pr // save PR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) and r18=0xf,r17 // r18 = cr.ipsr.code{3:0}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) tbit.z p6,p0=r17,IA64_ISR_NA_BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) dep r16=-1,r16,IA64_PSR_ED_BIT,1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) (p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) MOV_TO_IPSR(p0, r16, r18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) mov pr=r31,-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 1: mov pr=r31,-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) FAULT(26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) END(nat_consumption)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) .org ia64_ivt+0x5700
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) // 0x5700 Entry 27 (size 16 bundles) Speculation (40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) ENTRY(speculation_vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) DBG_FAULT(27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * A [f]chk.[as] instruction needs to take the branch to the recovery code but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * this part of the architecture is not implemented in hardware on some CPUs, such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * as Itanium. Thus, in general we need to emulate the behavior. IIM contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * the relative target (not yet sign extended). So after sign extending it we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) * simply add it to IIP. We also need to reset the EI field of the IPSR to zero,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) * i.e., the slot to restart into.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * cr.imm contains zero_ext(imm21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) MOV_FROM_IIM(r18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) MOV_FROM_IIP(r17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) shl r18=r18,43 // put sign bit in position (43=64-21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) MOV_FROM_IPSR(p0, r16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) shr r18=r18,39 // sign extend (39=43-4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) add r17=r17,r18 // now add the offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) MOV_TO_IIP(r17, r19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) dep r16=0,r16,41,2 // clear EI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) MOV_TO_IPSR(p0, r16, r19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) RFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) END(speculation_vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) .org ia64_ivt+0x5800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) // 0x5800 Entry 28 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) DBG_FAULT(28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) FAULT(28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) .org ia64_ivt+0x5900
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) // 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) ENTRY(debug_vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) DBG_FAULT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) FAULT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) END(debug_vector)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) .org ia64_ivt+0x5a00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) // 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) ENTRY(unaligned_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) DBG_FAULT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) mov r31=pr // prepare to save predicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) br.sptk.many dispatch_unaligned_handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) END(unaligned_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) .org ia64_ivt+0x5b00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) // 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) ENTRY(unsupported_data_reference)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) DBG_FAULT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) FAULT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) END(unsupported_data_reference)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) .org ia64_ivt+0x5c00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) // 0x5c00 Entry 32 (size 16 bundles) Floating-Point Fault (64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) ENTRY(floating_point_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) DBG_FAULT(32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) FAULT(32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) END(floating_point_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) .org ia64_ivt+0x5d00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) // 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) ENTRY(floating_point_trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) DBG_FAULT(33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) FAULT(33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) END(floating_point_trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) .org ia64_ivt+0x5e00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) // 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) ENTRY(lower_privilege_trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) DBG_FAULT(34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) FAULT(34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) END(lower_privilege_trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) .org ia64_ivt+0x5f00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) // 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) ENTRY(taken_branch_trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) DBG_FAULT(35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) FAULT(35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) END(taken_branch_trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) .org ia64_ivt+0x6000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) // 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) ENTRY(single_step_trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) DBG_FAULT(36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) FAULT(36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) END(single_step_trap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) .org ia64_ivt+0x6100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) // 0x6100 Entry 37 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) DBG_FAULT(37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) FAULT(37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) .org ia64_ivt+0x6200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) // 0x6200 Entry 38 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) DBG_FAULT(38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) FAULT(38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) .org ia64_ivt+0x6300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) // 0x6300 Entry 39 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) DBG_FAULT(39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) FAULT(39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) .org ia64_ivt+0x6400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) // 0x6400 Entry 40 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) DBG_FAULT(40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) FAULT(40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) .org ia64_ivt+0x6500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) // 0x6500 Entry 41 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) DBG_FAULT(41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) FAULT(41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) .org ia64_ivt+0x6600
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) // 0x6600 Entry 42 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) DBG_FAULT(42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) FAULT(42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) .org ia64_ivt+0x6700
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) // 0x6700 Entry 43 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) DBG_FAULT(43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) FAULT(43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) .org ia64_ivt+0x6800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) // 0x6800 Entry 44 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) DBG_FAULT(44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) FAULT(44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) .org ia64_ivt+0x6900
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) // 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception (17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) ENTRY(ia32_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) DBG_FAULT(45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) FAULT(45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) END(ia32_exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) .org ia64_ivt+0x6a00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) // 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) ENTRY(ia32_intercept)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) DBG_FAULT(46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) FAULT(46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) END(ia32_intercept)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) .org ia64_ivt+0x6b00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) // 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) ENTRY(ia32_interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) DBG_FAULT(47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) FAULT(47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) END(ia32_interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) .org ia64_ivt+0x6c00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) // 0x6c00 Entry 48 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) DBG_FAULT(48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) FAULT(48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) .org ia64_ivt+0x6d00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) // 0x6d00 Entry 49 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) DBG_FAULT(49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) FAULT(49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) .org ia64_ivt+0x6e00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) // 0x6e00 Entry 50 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) DBG_FAULT(50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) FAULT(50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) .org ia64_ivt+0x6f00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) // 0x6f00 Entry 51 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) DBG_FAULT(51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) FAULT(51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) .org ia64_ivt+0x7000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) // 0x7000 Entry 52 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) DBG_FAULT(52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) FAULT(52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) .org ia64_ivt+0x7100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) // 0x7100 Entry 53 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) DBG_FAULT(53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) FAULT(53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) .org ia64_ivt+0x7200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) // 0x7200 Entry 54 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) DBG_FAULT(54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) FAULT(54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) .org ia64_ivt+0x7300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) // 0x7300 Entry 55 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) DBG_FAULT(55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) FAULT(55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) .org ia64_ivt+0x7400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) // 0x7400 Entry 56 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) DBG_FAULT(56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) FAULT(56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) .org ia64_ivt+0x7500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) // 0x7500 Entry 57 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) DBG_FAULT(57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) FAULT(57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) .org ia64_ivt+0x7600
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) // 0x7600 Entry 58 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) DBG_FAULT(58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) FAULT(58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) .org ia64_ivt+0x7700
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) // 0x7700 Entry 59 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) DBG_FAULT(59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) FAULT(59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) .org ia64_ivt+0x7800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) // 0x7800 Entry 60 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) DBG_FAULT(60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) FAULT(60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) .org ia64_ivt+0x7900
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) // 0x7900 Entry 61 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) DBG_FAULT(61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) FAULT(61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) .org ia64_ivt+0x7a00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) // 0x7a00 Entry 62 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) DBG_FAULT(62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) FAULT(62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) .org ia64_ivt+0x7b00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) // 0x7b00 Entry 63 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) DBG_FAULT(63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) FAULT(63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) .org ia64_ivt+0x7c00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) // 0x7c00 Entry 64 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) DBG_FAULT(64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) FAULT(64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) .org ia64_ivt+0x7d00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) // 0x7d00 Entry 65 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) DBG_FAULT(65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) FAULT(65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) .org ia64_ivt+0x7e00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) // 0x7e00 Entry 66 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) DBG_FAULT(66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) FAULT(66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) .org ia64_ivt+0x7f00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /////////////////////////////////////////////////////////////////////////////////////////
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) // 0x7f00 Entry 67 (size 16 bundles) Reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) DBG_FAULT(67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) FAULT(67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) //-----------------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) // call do_page_fault (predicates are in r31, psr.dt may be off, r16 is faulting address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) ENTRY(page_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) SSM_PSR_DT_AND_SRLZ_I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) SAVE_MIN_WITH_COVER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) alloc r15=ar.pfs,0,0,3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) MOV_FROM_IFA(out0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) MOV_FROM_ISR(out1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r14, r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) adds r3=8,r2 // set up second base pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) SSM_PSR_I(p15, p15, r14) // restore psr.i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) movl r14=ia64_leave_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) SAVE_REST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) mov rp=r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) adds out2=16,r12 // out2 = pointer to pt_regs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) br.call.sptk.many b6=ia64_do_page_fault // ignore return address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) END(page_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) ENTRY(non_syscall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) SAVE_MIN_WITH_COVER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) // There is no particular reason for this code to be here, other than that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) // there happens to be space here that would go unused otherwise. If this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) // fault ever gets "unreserved", simply moved the following code to a more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) // suitable spot...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) alloc r14=ar.pfs,0,0,2,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) MOV_FROM_IIM(out0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) add out1=16,sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) adds r3=8,r2 // set up second base pointer for SAVE_REST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r15, r24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) // guarantee that interruption collection is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) SSM_PSR_I(p15, p15, r15) // restore psr.i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) movl r15=ia64_leave_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) SAVE_REST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) mov rp=r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) br.call.sptk.many b6=ia64_bad_break // avoid WAW on CFM and ignore return addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) END(non_syscall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) ENTRY(__interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) DBG_FAULT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) mov r31=pr // prepare to save predicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) SAVE_MIN_WITH_COVER // uses r31; defines r2 and r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) // ensure everybody knows psr.ic is back on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) adds r3=8,r2 // set up second base pointer for SAVE_REST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) SAVE_REST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) MCA_RECOVER_RANGE(interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) MOV_FROM_IVR(out0, r8) // pass cr.ivr as first arg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) add out1=16,sp // pass pointer to pt_regs as second arg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) srlz.d // make sure we see the effect of cr.ivr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) movl r14=ia64_leave_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) mov rp=r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) br.call.sptk.many b6=ia64_handle_irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) END(__interrupt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * There is no particular reason for this code to be here, other than that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * there happens to be space here that would go unused otherwise. If this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * fault ever gets "unreserved", simply moved the following code to a more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * suitable spot...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) ENTRY(dispatch_unaligned_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) SAVE_MIN_WITH_COVER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) MOV_FROM_IFA(out0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) adds out1=16,sp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) // guarantee that interruption collection is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) SSM_PSR_I(p15, p15, r3) // restore psr.i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) adds r3=8,r2 // set up second base pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) SAVE_REST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) movl r14=ia64_leave_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) mov rp=r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) br.sptk.many ia64_prepare_handle_unaligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) END(dispatch_unaligned_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) * There is no particular reason for this code to be here, other than that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) * there happens to be space here that would go unused otherwise. If this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) * fault ever gets "unreserved", simply moved the following code to a more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) * suitable spot...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) ENTRY(dispatch_to_fault_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) * psr.ic: off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) * r19: fault vector number (e.g., 24 for General Exception)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * r31: contains saved predicates (pr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) SAVE_MIN_WITH_COVER_R19
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) alloc r14=ar.pfs,0,0,5,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) MOV_FROM_ISR(out1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) MOV_FROM_IFA(out2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) MOV_FROM_IIM(out3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) MOV_FROM_ITIR(out4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, out0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) // guarantee that interruption collection is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) mov out0=r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) SSM_PSR_I(p15, p15, r3) // restore psr.i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) adds r3=8,r2 // set up second base pointer for SAVE_REST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) SAVE_REST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) movl r14=ia64_leave_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) mov rp=r14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) br.call.sptk.many b6=ia64_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) END(dispatch_to_fault_handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) * Squatting in this space ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * This special case dispatcher for illegal operation faults allows preserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * registers to be modified through a callback function (asm only) that is handed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * back from the fault handler in r8. Up to three arguments can be passed to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * callback function by returning an aggregate with the callback as its first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * element, followed by the arguments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) ENTRY(dispatch_illegal_op_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) .prologue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) .body
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) SAVE_MIN_WITH_COVER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) // guarantee that interruption collection is on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) SSM_PSR_I(p15, p15, r3) // restore psr.i
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) adds r3=8,r2 // set up second base pointer for SAVE_REST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) alloc r14=ar.pfs,0,0,1,0 // must be first in insn group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) mov out0=ar.ec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) SAVE_REST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) PT_REGS_UNWIND_INFO(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) br.call.sptk.many rp=ia64_illegal_op_fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) .ret0: ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) alloc r14=ar.pfs,0,0,3,0 // must be first in insn group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) mov out0=r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) mov out1=r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) mov out2=r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) movl r15=ia64_leave_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) mov rp=r15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) mov b6=r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) ;;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) cmp.ne p6,p0=0,r8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) (p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) br.sptk.many ia64_leave_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) END(dispatch_illegal_op_fault)