^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * arch/microblaze/mm/fault.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Derived from "arch/ppc/mm/fault.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Derived from "arch/i386/mm/fault.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Modified by Cort Dougan and Paul Mackerras.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * This file is subject to the terms and conditions of the GNU General
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Public License. See the file COPYING in the main directory of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * archive for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/exceptions.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static unsigned long pte_misses; /* updated by do_page_fault() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static unsigned long pte_errors; /* updated by do_page_fault() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Check whether the instruction at regs->pc is a store using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * an update addressing form which will update r1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static int store_updates_sp(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned int inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (get_user(inst, (unsigned int __user *)regs->pc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /* check for 1 in the rD field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (((inst >> 21) & 0x1f) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* check for store opcodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if ((inst & 0xd0000000) == 0xd0000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * bad_page_fault is called when we have a bad access from the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * It is called from do_page_fault above and from some of the procedures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * in traps.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) const struct exception_table_entry *fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* MS: no context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* Are we prepared to handle this fault? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) fixup = search_exception_tables(regs->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (fixup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) regs->pc = fixup->fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* kernel has accessed a bad area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) die("kernel access of bad area", regs, sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * The error_code parameter is ESR for a data fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * 0 for an instruction fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) void do_page_fault(struct pt_regs *regs, unsigned long address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned long error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int code = SEGV_MAPERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int is_write = error_code & ESR_S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) vm_fault_t fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) unsigned int flags = FAULT_FLAG_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) regs->ear = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) regs->esr = error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* On a kernel SLB miss we can only check for a valid exception entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) pr_warn("kernel task_size exceed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) _exception(SIGSEGV, regs, code, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* for instr TLB miss and instr storage exception ESR_S is undefined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) is_write = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (unlikely(faulthandler_disabled() || !mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (kernel_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) goto bad_area_nosemaphore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* faulthandler_disabled() in user mode is really bad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) as is current->mm == NULL. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) pr_emerg("r15 = %lx MSR = %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) regs->r15, regs->msr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) die("Weird page fault", regs, SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) flags |= FAULT_FLAG_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* When running in the kernel we expect faults to occur only to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * addresses in user space. All other faults represent errors in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * kernel and should generate an OOPS. Unfortunately, in the case of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * erroneous fault occurring in a code path which already holds mmap_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * we will deadlock attempting to validate the fault against the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * address space. Luckily the kernel only validly references user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * space from well defined areas of code, which are listed in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * exceptions table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * As the vast majority of faults will be valid we will only perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * the source reference check when there is a possibility of a deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * Attempt to lock the address space, if we cannot we then validate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * source. If this is invalid we can skip the address space check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * thus avoiding the deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (unlikely(!mmap_read_trylock(mm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (kernel_mode(regs) && !search_exception_tables(regs->pc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) goto bad_area_nosemaphore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) vma = find_vma(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (unlikely(!vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (vma->vm_start <= address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) goto good_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (unlikely(!is_write))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * N.B. The ABI allows programs to access up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * a few hundred bytes below the stack pointer (TBD).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * The kernel signal delivery code writes up to about 1.5kB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * below the stack pointer (r1) before decrementing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * The exec code can write slightly over 640kB to the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * before setting the user r1. Thus we allow the stack to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * expand to 1MB without further checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (unlikely(address + 0x100000 < vma->vm_end)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* get user regs even if this fault is in kernel mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct pt_regs *uregs = current->thread.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (uregs == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * A user-mode access to an address a long way below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * the stack pointer is only valid if the instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * is one which would update the stack pointer to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * address accessed if the instruction completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * i.e. either stwu rs,n(r1) or stwux rs,r1,rb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * (or the byte, halfword, float or double forms).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * If we don't check this then any write to the area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * between the last mapped region and the stack will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * expand the stack rather than segfaulting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (address + 2048 < uregs->r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) && (kernel_mode(regs) || !store_updates_sp(regs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (expand_stack(vma, address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) good_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) code = SEGV_ACCERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* a write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (unlikely(is_write)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (unlikely(!(vma->vm_flags & VM_WRITE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) flags |= FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* a read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* protection fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (unlikely(error_code & 0x08000000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * If for any reason at all we couldn't handle the fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * make sure we exit gracefully rather than endlessly redo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * the fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) fault = handle_mm_fault(vma, address, flags, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (fault_signal_pending(fault, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (unlikely(fault & VM_FAULT_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (fault & VM_FAULT_OOM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) goto out_of_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) else if (fault & VM_FAULT_SIGSEGV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) else if (fault & VM_FAULT_SIGBUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) goto do_sigbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (flags & FAULT_FLAG_ALLOW_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (fault & VM_FAULT_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) flags |= FAULT_FLAG_TRIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * No need to mmap_read_unlock(mm) as we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * have already released it in __lock_page_or_retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * in mm/filemap.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * keep track of tlb+htab misses that are good addrs but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * just need pte's created via handle_mm_fault()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * -- Cort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) pte_misses++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) bad_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) bad_area_nosemaphore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) pte_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /* User mode accesses cause a SIGSEGV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) _exception(SIGSEGV, regs, code, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) bad_page_fault(regs, address, SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * We ran out of memory, or some other thing happened to us that made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * us unable to handle the page fault gracefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) out_of_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) bad_page_fault(regs, address, SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) pagefault_out_of_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) do_sigbus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) bad_page_fault(regs, address, SIGBUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }