^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Page Fault Handling for ARC (TLB Miss / ProtV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * kernel virtual address is required to implement vmalloc/pkmap/fixmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Refer to asm/processor.h for System Memory Map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) noinline static int handle_kernel_vaddr_fault(unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Synchronize this task's top level page-table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * with the 'reference' page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) pgd_t *pgd, *pgd_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) p4d_t *p4d, *p4d_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) pud_t *pud, *pud_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) pmd_t *pmd, *pmd_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) pgd = pgd_offset_fast(current->active_mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) pgd_k = pgd_offset_k(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (!pgd_present(*pgd_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) p4d = p4d_offset(pgd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) p4d_k = p4d_offset(pgd_k, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (!p4d_present(*p4d_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) pud = pud_offset(p4d, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) pud_k = pud_offset(p4d_k, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (!pud_present(*pud_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) pmd = pmd_offset(pud, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) pmd_k = pmd_offset(pud_k, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (!pmd_present(*pmd_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) set_pmd(pmd, *pmd_k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /* XXX: create the TLB entry here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) bad_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) void do_page_fault(unsigned long address, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct vm_area_struct *vma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct mm_struct *mm = tsk->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int sig, si_code = SEGV_MAPERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned int write = 0, exec = 0, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned int flags; /* handle_mm_fault() input */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * NOTE! We MUST NOT take any locks for this case. We may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * be in an interrupt or a critical region, and should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * only copy the information from the master page table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * nothing more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (address >= VMALLOC_START && !user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (unlikely(handle_kernel_vaddr_fault(address)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * If we're in an interrupt or have no user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * context, we must not take the fault..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (faulthandler_disabled() || !mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) else if ((regs->ecr_vec == ECR_V_PROTV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) exec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) flags = FAULT_FLAG_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) flags |= FAULT_FLAG_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) flags |= FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) vma = find_vma(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (!vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (unlikely(address < vma->vm_start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * vm_area is good, now check permissions for this memory access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) mask = VM_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) mask = VM_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (exec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) mask = VM_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!(vma->vm_flags & mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) si_code = SEGV_ACCERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) fault = handle_mm_fault(vma, address, flags, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Quick path to respond to signals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (fault_signal_pending(fault, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * Fault retry nuances, mmap_lock already relinquished by core mm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (unlikely((fault & VM_FAULT_RETRY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) (flags & FAULT_FLAG_ALLOW_RETRY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) flags |= FAULT_FLAG_TRIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) bad_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Major/minor page fault accounting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * (in case of retry we only land here once)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (likely(!(fault & VM_FAULT_ERROR)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* Normal return path: fault Handled Gracefully */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (fault & VM_FAULT_OOM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) pagefault_out_of_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (fault & VM_FAULT_SIGBUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) sig = SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) si_code = BUS_ADRERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) sig = SIGSEGV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) tsk->thread.fault_address = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) force_sig_fault(sig, si_code, (void __user *)address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) no_context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (fixup_exception(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) die("Oops", regs, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }