^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // TODO VM_EXEC flag work-around, cache aliasing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * arch/xtensa/mm/fault.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2001 - 2010 Tensilica Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Chris Zankel <chris@zankel.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void bad_page_fault(struct pt_regs*, unsigned long, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * This routine handles page faults. It determines the address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * and the problem, and then passes it off to one of the appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Note: does not handle Miss and MultiHit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) void do_page_fault(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct vm_area_struct * vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned int exccause = regs->exccause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned int address = regs->excvaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int is_write, is_exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) vm_fault_t fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned int flags = FAULT_FLAG_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) code = SEGV_MAPERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* We fault-in kernel-space virtual memory on-demand. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * 'reference' page table is init_mm.pgd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (address >= TASK_SIZE && !user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) goto vmalloc_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* If we're in an interrupt or have no user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * context, we must not take the fault..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (faulthandler_disabled() || !mm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) bad_page_fault(regs, address, SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) is_write = (exccause == EXCCAUSE_STORE_CACHE_ATTRIBUTE) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) is_exec = (exccause == EXCCAUSE_ITLB_PRIVILEGE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) exccause == EXCCAUSE_ITLB_MISS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) exccause == EXCCAUSE_FETCH_CACHE_ATTRIBUTE) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) pr_debug("[%s:%d:%08x:%d:%08lx:%s%s]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) current->comm, current->pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) address, exccause, regs->pc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) is_write ? "w" : "", is_exec ? "x" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) flags |= FAULT_FLAG_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) vma = find_vma(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (vma->vm_start <= address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) goto good_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (!(vma->vm_flags & VM_GROWSDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (expand_stack(vma, address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Ok, we have a good vm_area for this memory access, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * we can handle it..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) good_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) code = SEGV_ACCERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (!(vma->vm_flags & VM_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) flags |= FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) } else if (is_exec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (!(vma->vm_flags & VM_EXEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) } else /* Allow read even from write-only pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* If for any reason at all we couldn't handle the fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * make sure we exit gracefully rather than endlessly redo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * the fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) fault = handle_mm_fault(vma, address, flags, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (fault_signal_pending(fault, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) goto bad_page_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (unlikely(fault & VM_FAULT_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (fault & VM_FAULT_OOM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) goto out_of_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) else if (fault & VM_FAULT_SIGSEGV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) else if (fault & VM_FAULT_SIGBUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) goto do_sigbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (flags & FAULT_FLAG_ALLOW_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (fault & VM_FAULT_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) flags |= FAULT_FLAG_TRIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* No need to mmap_read_unlock(mm) as we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * have already released it in __lock_page_or_retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * in mm/filemap.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Something tried to access memory that isn't in our memory map..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Fix it, but check if it's kernel or user first..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) bad_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) current->thread.bad_vaddr = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) current->thread.error_code = is_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) force_sig_fault(SIGSEGV, code, (void *) address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) bad_page_fault(regs, address, SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* We ran out of memory, or some other thing happened to us that made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * us unable to handle the page fault gracefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) out_of_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) bad_page_fault(regs, address, SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) pagefault_out_of_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) do_sigbus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* Send a sigbus, regardless of whether we were in kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * or user mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) current->thread.bad_vaddr = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* Kernel mode? Handle exceptions or die */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) bad_page_fault(regs, address, SIGBUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) vmalloc_fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* Synchronize this task's top level page-table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * with the 'reference' page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct mm_struct *act_mm = current->active_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int index = pgd_index(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) pgd_t *pgd, *pgd_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) p4d_t *p4d, *p4d_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) pud_t *pud, *pud_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) pmd_t *pmd, *pmd_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) pte_t *pte_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (act_mm == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) goto bad_page_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pgd = act_mm->pgd + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) pgd_k = init_mm.pgd + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (!pgd_present(*pgd_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) goto bad_page_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) pgd_val(*pgd) = pgd_val(*pgd_k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) p4d = p4d_offset(pgd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) p4d_k = p4d_offset(pgd_k, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (!p4d_present(*p4d) || !p4d_present(*p4d_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) goto bad_page_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) pud = pud_offset(p4d, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) pud_k = pud_offset(p4d_k, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (!pud_present(*pud) || !pud_present(*pud_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) goto bad_page_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) pmd = pmd_offset(pud, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) pmd_k = pmd_offset(pud_k, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (!pmd_present(*pmd) || !pmd_present(*pmd_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto bad_page_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) pmd_val(*pmd) = pmd_val(*pmd_k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) pte_k = pte_offset_kernel(pmd_k, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (!pte_present(*pte_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) goto bad_page_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) bad_page_fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) bad_page_fault(regs, address, SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) extern void die(const char*, struct pt_regs*, long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) const struct exception_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* Are we prepared to handle this kernel fault? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if ((entry = search_exception_tables(regs->pc)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) current->comm, regs->pc, entry->fixup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) current->thread.bad_uaddr = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) regs->pc = entry->fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* Oops. The kernel tried to access some bad page. We'll have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * terminate things with extreme prejudice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) pr_alert("Unable to handle kernel paging request at virtual "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) "address %08lx\n pc = %08lx, ra = %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) address, regs->pc, regs->areg[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) die("Oops", regs, sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) do_exit(sig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }