^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2009 Wind River Systems Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * based on arch/mips/mm/fault.c which is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 1995-2000 Ralf Baechle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define EXC_SUPERV_INSN_ACCESS 9 /* Supervisor only instruction address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define EXC_SUPERV_DATA_ACCESS 11 /* Supervisor only data address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define EXC_X_PROTECTION_FAULT 13 /* TLB permission violation (x) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define EXC_R_PROTECTION_FAULT 14 /* TLB permission violation (r) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define EXC_W_PROTECTION_FAULT 15 /* TLB permission violation (w) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * This routine handles page faults. It determines the address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * and the problem, and then passes it off to one of the appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct vm_area_struct *vma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct task_struct *tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct mm_struct *mm = tsk->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int code = SEGV_MAPERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) vm_fault_t fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned int flags = FAULT_FLAG_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) cause >>= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* Restart the instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) regs->ea -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * We fault-in kernel-space virtual memory on-demand. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * 'reference' page table is init_mm.pgd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * NOTE! We MUST NOT take any locks for this case. We may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * be in an interrupt or a critical region, and should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * only copy the information from the master page table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * nothing more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) goto bad_area_nosemaphore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) goto vmalloc_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (unlikely(address >= TASK_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) goto bad_area_nosemaphore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * If we're in an interrupt or have no user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * context, we must not take the fault..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (faulthandler_disabled() || !mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) goto bad_area_nosemaphore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) flags |= FAULT_FLAG_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (!mmap_read_trylock(mm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (!user_mode(regs) && !search_exception_tables(regs->ea))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) goto bad_area_nosemaphore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) vma = find_vma(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (!vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (vma->vm_start <= address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) goto good_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!(vma->vm_flags & VM_GROWSDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (expand_stack(vma, address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * Ok, we have a good vm_area for this memory access, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * we can handle it..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) good_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) code = SEGV_ACCERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) switch (cause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) case EXC_SUPERV_INSN_ACCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) case EXC_SUPERV_DATA_ACCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) case EXC_X_PROTECTION_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (!(vma->vm_flags & VM_EXEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) case EXC_R_PROTECTION_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (!(vma->vm_flags & VM_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) case EXC_W_PROTECTION_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (!(vma->vm_flags & VM_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) flags = FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * If for any reason at all we couldn't handle the fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * make sure we exit gracefully rather than endlessly redo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * the fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) fault = handle_mm_fault(vma, address, flags, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (fault_signal_pending(fault, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (unlikely(fault & VM_FAULT_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (fault & VM_FAULT_OOM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) goto out_of_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) else if (fault & VM_FAULT_SIGSEGV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) else if (fault & VM_FAULT_SIGBUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) goto do_sigbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (flags & FAULT_FLAG_ALLOW_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (fault & VM_FAULT_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) flags |= FAULT_FLAG_TRIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * No need to mmap_read_unlock(mm) as we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * have already released it in __lock_page_or_retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * in mm/filemap.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Something tried to access memory that isn't in our memory map..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * Fix it, but check if it's kernel or user first..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) bad_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) bad_area_nosemaphore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* User mode accesses just cause a SIGSEGV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) pr_info("%s: unhandled page fault (%d) at 0x%08lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) "cause %ld\n", current->comm, SIGSEGV, address, cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) show_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) _exception(SIGSEGV, regs, code, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) no_context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* Are we prepared to handle this kernel fault? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (fixup_exception(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Oops. The kernel tried to access some bad page. We'll have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * terminate things with extreme prejudice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) bust_spinlocks(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) pr_alert("Unable to handle kernel %s at virtual address %08lx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) address < PAGE_SIZE ? "NULL pointer dereference" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) "paging request", address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pr_alert("ea = %08lx, ra = %08lx, cause = %ld\n", regs->ea, regs->ra,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) cause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) panic("Oops");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * We ran out of memory, or some other thing happened to us that made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * us unable to handle the page fault gracefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) out_of_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) pagefault_out_of_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) do_sigbus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* Kernel mode? Handle exceptions or die */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) _exception(SIGBUS, regs, BUS_ADRERR, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) vmalloc_fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * Synchronize this task's top level page-table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * with the 'reference' page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * Do _not_ use "tsk" here. We might be inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * an interrupt in the middle of a task switch..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) int offset = pgd_index(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) pgd_t *pgd, *pgd_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) p4d_t *p4d, *p4d_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) pud_t *pud, *pud_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) pmd_t *pmd, *pmd_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) pte_t *pte_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) pgd = pgd_current + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) pgd_k = init_mm.pgd + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!pgd_present(*pgd_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) set_pgd(pgd, *pgd_k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) p4d = p4d_offset(pgd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) p4d_k = p4d_offset(pgd_k, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (!p4d_present(*p4d_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) pud = pud_offset(p4d, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) pud_k = pud_offset(p4d_k, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (!pud_present(*pud_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) pmd = pmd_offset(pud, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) pmd_k = pmd_offset(pud_k, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!pmd_present(*pmd_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) set_pmd(pmd, *pmd_k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) pte_k = pte_offset_kernel(pmd_k, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (!pte_present(*pte_k))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) flush_tlb_kernel_page(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }