^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (C) 2005-2017 Andes Technology Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) extern void die(const char *str, struct pt_regs *regs, long err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * This is useful to dump out the page tables associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * 'addr' in mm 'mm'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) void show_pte(struct mm_struct *mm, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) if (!mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) mm = &init_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) pr_alert("pgd = %p\n", mm->pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) pgd = pgd_offset(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) pr_alert("[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) if (pgd_none(*pgd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (pgd_bad(*pgd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) pr_alert("(bad)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #if PTRS_PER_PMD != 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) pr_alert(", *pmd=%08lx", pmd_val(*pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (pmd_none(*pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (pmd_bad(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) pr_alert("(bad)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (IS_ENABLED(CONFIG_HIGHMEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* We must not map this if we have highmem enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) pte = pte_offset_map(pmd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) pr_alert(", *pte=%08lx", pte_val(*pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) pte_unmap(pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) } while (0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) pr_alert("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) void do_page_fault(unsigned long entry, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned int error_code, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int si_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) vm_fault_t fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned int mask = VM_ACCESS_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) unsigned int flags = FAULT_FLAG_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) mm = tsk->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) si_code = SEGV_MAPERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * We fault-in kernel-space virtual memory on-demand. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * 'reference' page table is init_mm.pgd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * NOTE! We MUST NOT take any locks for this case. We may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * be in an interrupt or a critical region, and should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * only copy the information from the master page table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * nothing more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (addr >= TASK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) goto bad_area_nosemaphore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (addr >= TASK_SIZE && addr < VMALLOC_END
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) && (entry == ENTRY_PTE_NOT_PRESENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) goto vmalloc_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Send a signal to the task for handling the unalignment access. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (entry == ENTRY_GENERAL_EXCPETION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) && error_code == ETYPE_ALIGNMENT_CHECK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) goto bad_area_nosemaphore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * If we're in an interrupt or have no user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * context, we must not take the fault..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (unlikely(faulthandler_disabled() || !mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * As per x86, we may deadlock here. However, since the kernel only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * validly references user space from well defined areas of the code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * we can bug out early if this is from code which shouldn't.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (unlikely(!mmap_read_trylock(mm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (!user_mode(regs) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) !search_exception_tables(instruction_pointer(regs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * The above down_read_trylock() might have succeeded in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * case, we'll have missed the might_sleep() from down_read().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (IS_ENABLED(CONFIG_DEBUG_VM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (!user_mode(regs) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) !search_exception_tables(instruction_pointer(regs)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) vma = find_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (unlikely(!vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (vma->vm_start <= addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) goto good_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (unlikely(expand_stack(vma, addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * Ok, we have a good vm_area for this memory access, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * we can handle it..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) good_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) si_code = SEGV_ACCERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* first do some preliminary protection checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (entry == ENTRY_PTE_NOT_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (error_code & ITYPE_mskINST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) mask = VM_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) mask = VM_READ | VM_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) } else if (entry == ENTRY_TLB_MISC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) switch (error_code & ITYPE_mskETYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) case RD_PROT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) mask = VM_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) case WRT_PROT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) mask = VM_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) flags |= FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) case NOEXEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) mask = VM_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) case PAGE_MODIFY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) mask = VM_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) flags |= FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) case ACC_BIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (!(vma->vm_flags & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * If for any reason at all we couldn't handle the fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * make sure we exit gracefully rather than endlessly redo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * the fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) fault = handle_mm_fault(vma, addr, flags, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * If we need to retry but a fatal signal is pending, handle the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * signal first. We do not need to release the mmap_lock because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * would already be released in __lock_page_or_retry in mm/filemap.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (fault_signal_pending(fault, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (unlikely(fault & VM_FAULT_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (fault & VM_FAULT_OOM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) goto out_of_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) else if (fault & VM_FAULT_SIGBUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) goto do_sigbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (flags & FAULT_FLAG_ALLOW_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (fault & VM_FAULT_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) flags |= FAULT_FLAG_TRIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* No need to mmap_read_unlock(mm) as we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * have already released it in __lock_page_or_retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * in mm/filemap.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * Something tried to access memory that isn't in our memory map..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Fix it, but check if it's kernel or user first..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) bad_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) bad_area_nosemaphore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* User mode accesses just cause a SIGSEGV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) tsk->thread.address = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) tsk->thread.error_code = error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) tsk->thread.trap_no = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) force_sig_fault(SIGSEGV, si_code, (void __user *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) no_context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Are we prepared to handle this kernel fault?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * (The kernel has valid exception-points in the source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * when it acesses user-memory. When it fails in one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * of those points, we find it in a table and do a jump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * to some fixup code that loads an appropriate error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) const struct exception_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if ((entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) search_exception_tables(instruction_pointer(regs))) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* Adjust the instruction pointer in the stackframe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) instruction_pointer(regs) = entry->fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * Oops. The kernel tried to access some bad page. We'll have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * terminate things with extreme prejudice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) bust_spinlocks(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) (addr < PAGE_SIZE) ? "NULL pointer dereference" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) "paging request", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) show_pte(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) die("Oops", regs, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) bust_spinlocks(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) do_exit(SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * We ran out of memory, or some other thing happened to us that made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * us unable to handle the page fault gracefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) out_of_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) pagefault_out_of_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) do_sigbus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* Kernel mode? Handle exceptions or die */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Send a sigbus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) tsk->thread.address = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) tsk->thread.error_code = error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) tsk->thread.trap_no = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) vmalloc_fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * Synchronize this task's top level page-table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * with the 'reference' page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * Use current_pgd instead of tsk->active_mm->pgd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * since the latter might be unavailable if this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * code is executed in a misfortunately run irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * (like inside schedule() between switch_mm and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * switch_to...).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) unsigned int index = pgd_index(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) pgd_t *pgd, *pgd_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) p4d_t *p4d, *p4d_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) pud_t *pud, *pud_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) pmd_t *pmd, *pmd_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) pte_t *pte_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) pgd = (pgd_t *) __va(__nds32__mfsr(NDS32_SR_L1_PPTB)) + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) pgd_k = init_mm.pgd + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (!pgd_present(*pgd_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) p4d_k = p4d_offset(pgd_k, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (!p4d_present(*p4d_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) pud_k = pud_offset(p4d_k, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (!pud_present(*pud_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) pmd_k = pmd_offset(pud_k, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (!pmd_present(*pmd_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!pmd_present(*pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) set_pmd(pmd, *pmd_k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * Since the vmalloc area is global, we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * need to copy individual PTE's, it is enough to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * copy the pgd pointer into the pte page of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * root task. If that is there, we'll find our pte if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * it exists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /* Make sure the actual PTE exists as well to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * catch kernel vmalloc-area accesses to non-mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * addres. If we don't do this, this will just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * silently loop forever.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) pte_k = pte_offset_kernel(pmd_k, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (!pte_present(*pte_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }