^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Memory fault handling for Hexagon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Page fault handling for the Hexagon Virtual Machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Can also be called by a native port emulating the HVM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * execptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Decode of hardware exception sends us to one of several
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * entry points. At each, we generate canonical arguments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * for handling by the abstract memory management code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define FLT_IFETCH -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define FLT_LOAD 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define FLT_STORE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Canonical page fault handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) int si_signo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int si_code = SEGV_MAPERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) vm_fault_t fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) const struct exception_table_entry *fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned int flags = FAULT_FLAG_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * If we're in an interrupt or have no user context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * then must not take the fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (unlikely(in_interrupt() || !mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) flags |= FAULT_FLAG_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) vma = find_vma(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (!vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (vma->vm_start <= address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) goto good_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (!(vma->vm_flags & VM_GROWSDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (expand_stack(vma, address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) good_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* Address space is OK. Now check access rights. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) si_code = SEGV_ACCERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) switch (cause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) case FLT_IFETCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (!(vma->vm_flags & VM_EXEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) case FLT_LOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (!(vma->vm_flags & VM_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) case FLT_STORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (!(vma->vm_flags & VM_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) flags |= FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) fault = handle_mm_fault(vma, address, flags, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (fault_signal_pending(fault, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* The most common case -- we are done. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (likely(!(fault & VM_FAULT_ERROR))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (flags & FAULT_FLAG_ALLOW_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (fault & VM_FAULT_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) flags |= FAULT_FLAG_TRIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Handle copyin/out exception cases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (fault & VM_FAULT_OOM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) pagefault_out_of_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* User-mode address is in the memory map, but we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * unable to fix up the page fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (fault & VM_FAULT_SIGBUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) si_signo = SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) si_code = BUS_ADRERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Address is not in the memory map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) si_signo = SIGSEGV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) si_code = SEGV_ACCERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) force_sig_fault(si_signo, si_code, (void __user *)address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) bad_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) force_sig_fault(SIGSEGV, si_code, (void __user *)address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* Kernel-mode fault falls through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) no_context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) fixup = search_exception_tables(pt_elr(regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (fixup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) pt_set_elr(regs, fixup->fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* Things are looking very, very bad now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) bust_spinlocks(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) printk(KERN_EMERG "Unable to handle kernel paging request at "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) "virtual address 0x%08lx, regs %p\n", address, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) die("Bad Kernel VA", regs, SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) void read_protection_fault(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) unsigned long badvadr = pt_badva(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) do_page_fault(badvadr, FLT_LOAD, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) void write_protection_fault(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned long badvadr = pt_badva(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) do_page_fault(badvadr, FLT_STORE, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) void execute_protection_fault(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned long badvadr = pt_badva(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) do_page_fault(badvadr, FLT_IFETCH, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }