Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * MMU fault handling support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 1998-2002 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *	David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/prefetch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/exception.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) extern int die(char *, struct pt_regs *, long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * Return TRUE if ADDRESS points at a page in the kernel's mapped segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * (inside region 5, on ia64) and that page is present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) mapped_kernel_page_is_present (unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	pte_t *ptep, pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	pgd = pgd_offset_k(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	if (pgd_none(*pgd) || pgd_bad(*pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	p4d = p4d_offset(pgd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	if (p4d_none(*p4d) || p4d_bad(*p4d))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	pud = pud_offset(p4d, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	if (pud_none(*pud) || pud_bad(*pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	pmd = pmd_offset(pud, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	if (pmd_none(*pmd) || pmd_bad(*pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	ptep = pte_offset_kernel(pmd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	if (!ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	pte = *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	return pte_present(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #	define VM_READ_BIT	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #	define VM_WRITE_BIT	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #	define VM_EXEC_BIT	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) void __kprobes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	int signal = SIGSEGV, code = SEGV_MAPERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	struct vm_area_struct *vma, *prev_vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	vm_fault_t fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	unsigned int flags = FAULT_FLAG_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	/* mmap_lock is performance critical.... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	prefetchw(&mm->mmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	 * If we're in an interrupt or have no user context, we must not take the fault..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (faulthandler_disabled() || !mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #ifdef CONFIG_VIRTUAL_MEM_MAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	 * If fault is in region 5 and we are in the kernel, we may already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	 * have the mmap_lock (pfn_valid macro is called during mmap). There
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	 * is no vma for region 5 addr's anyway, so skip getting the semaphore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	 * and go directly to the exception handling code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	if ((REGION_NUMBER(address) == 5) && !user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		goto bad_area_no_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	 * This is to handle the kprobes on user space access instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (kprobe_page_fault(regs, TRAP_BRKPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		flags |= FAULT_FLAG_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (mask & VM_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		flags |= FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	vma = find_vma_prev(mm, address, &prev_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	if (!vma && !prev_vma )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)         /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)          * find_vma_prev() returns vma such that address < vma->vm_end or NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)          *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)          * May find no vma, but could be that the last vm area is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)          * register backing store that needs to expand upwards, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)          * this case vma will be null, but prev_vma will ne non-null
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)          */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)         if (( !vma && prev_vma ) || (address < vma->vm_start) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		goto check_expansion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)   good_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	code = SEGV_ACCERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	/* OK, we've got a good vm_area for this memory area.  Check the access permissions: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #	if (((1 << VM_READ_BIT) != VM_READ || (1 << VM_WRITE_BIT) != VM_WRITE) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	    || (1 << VM_EXEC_BIT) != VM_EXEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #		error File is out of sync with <linux/mm.h>.  Please update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #	endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if ((vma->vm_flags & mask) != mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	 * If for any reason at all we couldn't handle the fault, make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	 * sure we exit gracefully rather than endlessly redo the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	 * fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	fault = handle_mm_fault(vma, address, flags, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	if (fault_signal_pending(fault, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	if (unlikely(fault & VM_FAULT_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		 * We ran out of memory, or some other thing happened
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		 * to us that made us unable to handle the page fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		 * gracefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		if (fault & VM_FAULT_OOM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 			goto out_of_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		} else if (fault & VM_FAULT_SIGSEGV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		} else if (fault & VM_FAULT_SIGBUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 			signal = SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		if (fault & VM_FAULT_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 			flags |= FAULT_FLAG_TRIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 			 /* No need to mmap_read_unlock(mm) as we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			 * have already released it in __lock_page_or_retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 			 * in mm/filemap.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 			goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)   check_expansion:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		if (!vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		if (!(vma->vm_flags & VM_GROWSDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		if (expand_stack(vma, address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		vma = prev_vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		    || REGION_OFFSET(address) >= RGN_MAP_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		 * Since the register backing store is accessed sequentially,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		 * we disallow growing it by more than a page at a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		if (address > vma->vm_end + PAGE_SIZE - sizeof(long))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		if (expand_upwards(vma, address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 			goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	goto good_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)   bad_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #ifdef CONFIG_VIRTUAL_MEM_MAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)   bad_area_no_up:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	if ((isr & IA64_ISR_SP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		 * bit in the psr to ensure forward progress.  (Target register will get a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		 * NaT for ld.s, lfetch will be canceled.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		ia64_psr(regs)->ed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		force_sig_fault(signal, code, (void __user *) address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 				0, __ISR_VALID, isr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)   no_context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	if ((isr & IA64_ISR_SP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	    || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		 * This fault was due to a speculative load or lfetch.fault, set the "ed"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		 * bit in the psr to ensure forward progress.  (Target register will get a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		 * NaT for ld.s, lfetch will be canceled.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		ia64_psr(regs)->ed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	 * Since we have no vma's for region 5, we might get here even if the address is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	 * valid, due to the VHPT walker inserting a non present translation that becomes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	 * stale. If that happens, the non present fault handler already purged the stale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	 * translation, which fixed the problem. So, we check to see if the translation is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	 * valid, and return if it is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	if (REGION_NUMBER(address) == 5 && mapped_kernel_page_is_present(address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (ia64_done_with_exception(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	 * Oops. The kernel tried to access some bad page. We'll have to terminate things
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	 * with extreme prejudice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	bust_spinlocks(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (address < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference (address %016lx)\n", address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		printk(KERN_ALERT "Unable to handle kernel paging request at "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		       "virtual address %016lx\n", address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	if (die("Oops", regs, isr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		regs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	bust_spinlocks(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	if (regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		do_exit(SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)   out_of_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	pagefault_out_of_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }