^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Lennox Wu <lennox.wu@sunplusct.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Chen Liqin <liqin.chen@sunplusct.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2012 Regents of the University of California
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "../kernel/head.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static inline void no_context(struct pt_regs *regs, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) /* Are we prepared to handle this kernel fault? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) if (fixup_exception(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Oops. The kernel tried to access some bad page. We'll have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * terminate things with extreme prejudice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) bust_spinlocks(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) (addr < PAGE_SIZE) ? "NULL pointer dereference" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) "paging request", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) die(regs, "Oops");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) do_exit(SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (fault & VM_FAULT_OOM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * We ran out of memory, call the OOM killer, and return the userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * (which will retry the fault, or kill us if we got oom-killed).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (!user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) no_context(regs, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) pagefault_out_of_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) } else if (fault & VM_FAULT_SIGBUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Kernel mode? Handle exceptions or die */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (!user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) no_context(regs, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) do_trap(regs, SIGBUS, BUS_ADRERR, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Something tried to access memory that isn't in our memory map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Fix it, but check if it's kernel or user first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* User mode accesses just cause a SIGSEGV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) do_trap(regs, SIGSEGV, code, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) no_context(regs, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) pgd_t *pgd, *pgd_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) pud_t *pud, *pud_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) p4d_t *p4d, *p4d_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) pmd_t *pmd, *pmd_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) pte_t *pte_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* User mode accesses just cause a SIGSEGV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return do_trap(regs, SIGSEGV, code, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Synchronize this task's top level page-table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * with the 'reference' page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * Do _not_ use "tsk->active_mm->pgd" here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * We might be inside an interrupt in the middle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * of a task switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) index = pgd_index(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) pfn = csr_read(CSR_SATP) & SATP_PPN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) pgd = (pgd_t *)pfn_to_virt(pfn) + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) pgd_k = init_mm.pgd + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (!pgd_present(*pgd_k)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) no_context(regs, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) set_pgd(pgd, *pgd_k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) p4d_k = p4d_offset(pgd_k, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!p4d_present(*p4d_k)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) no_context(regs, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) pud_k = pud_offset(p4d_k, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!pud_present(*pud_k)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) no_context(regs, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * Since the vmalloc area is global, it is unnecessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * to copy individual PTEs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) pmd_k = pmd_offset(pud_k, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (!pmd_present(*pmd_k)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) no_context(regs, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) set_pmd(pmd, *pmd_k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * Make sure the actual PTE exists as well to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * catch kernel vmalloc-area accesses to non-mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * addresses. If we don't do this, this will just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * silently loop forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) pte_k = pte_offset_kernel(pmd_k, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!pte_present(*pte_k)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) no_context(regs, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * The kernel assumes that TLBs don't cache invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * entries, but in RISC-V, SFENCE.VMA specifies an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * ordering constraint, not a cache flush; it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * necessary even after writing invalid entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) local_flush_tlb_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) switch (cause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) case EXC_INST_PAGE_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (!(vma->vm_flags & VM_EXEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) case EXC_LOAD_PAGE_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (!(vma->vm_flags & VM_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) case EXC_STORE_PAGE_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (!(vma->vm_flags & VM_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) panic("%s: unhandled cause %lu", __func__, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * This routine handles page faults. It determines the address and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * problem, and then passes it off to one of the appropriate routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) asmlinkage void do_page_fault(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) unsigned long addr, cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned int flags = FAULT_FLAG_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int code = SEGV_MAPERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) vm_fault_t fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) cause = regs->cause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) addr = regs->badaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) mm = tsk->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Fault-in kernel-space virtual memory on-demand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * The 'reference' page table is init_mm.pgd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * NOTE! We MUST NOT take any locks for this case. We may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * be in an interrupt or a critical region, and should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * only copy the information from the master page table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * nothing more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) vmalloc_fault(regs, code, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* Enable interrupts if they were enabled in the parent context. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (likely(regs->status & SR_PIE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * If we're in an interrupt, have no user context, or are running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * in an atomic region, then we must not take the fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (unlikely(faulthandler_disabled() || !mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) no_context(regs, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) flags |= FAULT_FLAG_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (cause == EXC_STORE_PAGE_FAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) flags |= FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) else if (cause == EXC_INST_PAGE_FAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) flags |= FAULT_FLAG_INSTRUCTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) vma = find_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (unlikely(!vma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) bad_area(regs, mm, code, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (likely(vma->vm_start <= addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) goto good_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) bad_area(regs, mm, code, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (unlikely(expand_stack(vma, addr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) bad_area(regs, mm, code, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * Ok, we have a good vm_area for this memory access, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * we can handle it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) good_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) code = SEGV_ACCERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (unlikely(access_error(cause, vma))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) bad_area(regs, mm, code, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * If for any reason at all we could not handle the fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * make sure we exit gracefully rather than endlessly redo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * the fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) fault = handle_mm_fault(vma, addr, flags, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * If we need to retry but a fatal signal is pending, handle the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * signal first. We do not need to release the mmap_lock because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * would already be released in __lock_page_or_retry in mm/filemap.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (fault_signal_pending(fault, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) flags |= FAULT_FLAG_TRIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * No need to mmap_read_unlock(mm) as we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * have already released it in __lock_page_or_retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * in mm/filemap.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (unlikely(fault & VM_FAULT_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) mm_fault_error(regs, addr, fault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }