Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * OpenRISC fault.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Linux architectural port borrowing liberally from similar works of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * others.  All original copyrights apply as per the original source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * declaration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Modifications for the OpenRISC architecture:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/siginfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define NUM_TLB_ENTRIES 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) unsigned long pte_misses;	/* updated by do_page_fault() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) unsigned long pte_errors;	/* updated by do_page_fault() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /* __PHX__ :: - check the vmalloc_fault in do_page_fault()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *            - also look into include/asm-or32/mmu_context.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) volatile pgd_t *current_pgd[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) extern void die(char *, struct pt_regs *, long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * This routine handles page faults.  It determines the address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * and the problem, and then passes it off to one of the appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * If this routine detects a bad access, it returns 1, otherwise it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 			      unsigned long vector, int write_acc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	int si_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	vm_fault_t fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	unsigned int flags = FAULT_FLAG_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	 * We fault-in kernel-space virtual memory on-demand. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	 * 'reference' page table is init_mm.pgd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	 * NOTE! We MUST NOT take any locks for this case. We may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	 * be in an interrupt or a critical region, and should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	 * only copy the information from the master page table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	 * nothing more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	 * NOTE2: This is done so that, when updating the vmalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	 * mappings we don't have to walk all processes pgdirs and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 * add the high mappings all at once. Instead we do it as they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	 * bit set so sometimes the TLB can use a lingering entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	 * This verifies that the fault happens in kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	 * and that the fault was not a protection error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	if (address >= VMALLOC_START &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	    (vector != 0x300 && vector != 0x400) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	    !user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		goto vmalloc_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	/* If exceptions were enabled, we can reenable them here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		/* Exception was in userspace: reenable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		flags |= FAULT_FLAG_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		/* If exception was in a syscall, then IRQ's may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		 * been enabled or disabled.  If they were enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		 * reenable them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		if (regs->sr && (SPR_SR_IEE | SPR_SR_TEE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	mm = tsk->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	si_code = SEGV_MAPERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	 * If we're in an interrupt or have no user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	 * context, we must not take the fault..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	if (in_interrupt() || !mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	vma = find_vma(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	if (!vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (vma->vm_start <= address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		goto good_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if (!(vma->vm_flags & VM_GROWSDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		 * accessing the stack below usp is always a bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		 * we get page-aligned addresses so we can only check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		 * if we're within a page from usp, but that might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		 * enough to catch brutal errors at least.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		if (address + PAGE_SIZE < regs->sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	if (expand_stack(vma, address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	 * Ok, we have a good vm_area for this memory access, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	 * we can handle it..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) good_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	si_code = SEGV_ACCERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	/* first do some preliminary protection checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (write_acc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		if (!(vma->vm_flags & VM_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		flags |= FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		/* not present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	/* are we trying to execute nonexecutable area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	 * If for any reason at all we couldn't handle the fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	 * make sure we exit gracefully rather than endlessly redo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	 * the fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	fault = handle_mm_fault(vma, address, flags, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	if (fault_signal_pending(fault, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	if (unlikely(fault & VM_FAULT_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		if (fault & VM_FAULT_OOM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 			goto out_of_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		else if (fault & VM_FAULT_SIGSEGV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 			goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		else if (fault & VM_FAULT_SIGBUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			goto do_sigbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		/*RGD modeled on Cris */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		if (fault & VM_FAULT_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			flags |= FAULT_FLAG_TRIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			 /* No need to mmap_read_unlock(mm) as we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			 * have already released it in __lock_page_or_retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			 * in mm/filemap.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	 * Something tried to access memory that isn't in our memory map..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	 * Fix it, but check if it's kernel or user first..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) bad_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) bad_area_nosemaphore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	/* User mode accesses just cause a SIGSEGV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) no_context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	/* Are we prepared to handle this kernel fault?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	 * (The kernel has valid exception-points in the source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	 *  when it acesses user-memory. When it fails in one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	 *  of those points, we find it in a table and do a jump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	 *  to some fixup code that loads an appropriate error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	 *  code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		const struct exception_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		__asm__ __volatile__("l.nop 42");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		if ((entry = search_exception_tables(regs->pc)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 			/* Adjust the instruction pointer in the stackframe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 			regs->pc = entry->fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	 * Oops. The kernel tried to access some bad page. We'll have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	 * terminate things with extreme prejudice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	if ((unsigned long)(address) < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		printk(KERN_ALERT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		       "Unable to handle kernel NULL pointer dereference");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		printk(KERN_ALERT "Unable to handle kernel access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	printk(" at virtual address 0x%08lx\n", address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	die("Oops", regs, write_acc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	do_exit(SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	 * We ran out of memory, or some other thing happened to us that made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	 * us unable to handle the page fault gracefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) out_of_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	__asm__ __volatile__("l.nop 42");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	__asm__ __volatile__("l.nop 1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	pagefault_out_of_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) do_sigbus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	 * Send a sigbus, regardless of whether we were in kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	 * or user mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	/* Kernel mode? Handle exceptions or die */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) vmalloc_fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		 * Synchronize this task's top level page-table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		 * with the 'reference' page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		 * Use current_pgd instead of tsk->active_mm->pgd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		 * since the latter might be unavailable if this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		 * code is executed in a misfortunately run irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		 * (like inside schedule() between switch_mm and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		 *  switch_to...).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		int offset = pgd_index(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		pgd_t *pgd, *pgd_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		p4d_t *p4d, *p4d_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		pud_t *pud, *pud_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		pmd_t *pmd, *pmd_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 		pte_t *pte_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		phx_warn("do_page_fault(): vmalloc_fault will not work, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			 "since current_pgd assign a proper value somewhere\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 			 "anyhow we don't need this at the moment\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		phx_mmu("vmalloc_fault");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		pgd = (pgd_t *)current_pgd[smp_processor_id()] + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		pgd_k = init_mm.pgd + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		/* Since we're two-level, we don't need to do both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		 * set_pgd and set_pmd (they do the same thing). If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		 * we go three-level at some point, do the right thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		 * with pgd_present and set_pgd here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		 * Also, since the vmalloc area is global, we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		 * need to copy individual PTE's, it is enough to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		 * copy the pgd pointer into the pte page of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		 * root task. If that is there, we'll find our pte if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		 * it exists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		p4d = p4d_offset(pgd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		p4d_k = p4d_offset(pgd_k, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		if (!p4d_present(*p4d_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 			goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		pud = pud_offset(p4d, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		pud_k = pud_offset(p4d_k, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		if (!pud_present(*pud_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		pmd = pmd_offset(pud, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		pmd_k = pmd_offset(pud_k, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		if (!pmd_present(*pmd_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			goto bad_area_nosemaphore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		set_pmd(pmd, *pmd_k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		/* Make sure the actual PTE exists as well to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		 * catch kernel vmalloc-area accesses to non-mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		 * addresses. If we don't do this, this will just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		 * silently loop forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		pte_k = pte_offset_kernel(pmd_k, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		if (!pte_present(*pte_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }