^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/alpha/mm/fault.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1995 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define __EXTERN_INLINE inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #undef __EXTERN_INLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Force a new ASN for a task.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #ifndef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned long last_asn = ASN_FIRST_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) __load_new_mm_context(struct mm_struct *next_mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned long mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct pcb_struct *pcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) mmc = __get_new_mm_context(next_mm, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) next_mm->context[smp_processor_id()] = mmc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) pcb = ¤t_thread_info()->pcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) pcb->asn = mmc & HARDWARE_ASN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) pcb->ptbr = ((unsigned long) next_mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) __reload_thread(pcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * This routine handles page faults. It determines the address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * and the problem, and then passes it off to handle_mm_fault().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * mmcsr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * 0 = translation not valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * 1 = access violation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * 2 = fault-on-read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * 3 = fault-on-execute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * 4 = fault-on-write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * cause:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * -1 = instruction fetch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * 0 = load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * 1 = store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Registers $9 through $15 are saved in a block just prior to `regs' and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * are saved and restored around the call to allow exception code to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * modify them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* Macro for exception fixup code to access integer registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define dpf_reg(r) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) (r) <= 18 ? (r)+10 : (r)-10])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) asmlinkage void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) do_page_fault(unsigned long address, unsigned long mmcsr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) long cause, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct vm_area_struct * vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) const struct exception_table_entry *fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int si_code = SEGV_MAPERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) vm_fault_t fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned int flags = FAULT_FLAG_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* As of EV6, a load into $31/$f31 is a prefetch, and never faults
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) (or is suppressed by the PALcode). Support that for older CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) by ignoring such an instruction. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (cause == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned int insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) __get_user(insn, (unsigned int __user *)regs->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if ((insn >> 21 & 0x1f) == 0x1f &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* ldq ldl ldt lds ldg ldf ldwu ldbu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) (1ul << (insn >> 26) & 0x30f00001400ul)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) regs->pc += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* If we're in an interrupt context, or have no user context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) we must not take the fault. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (!mm || faulthandler_disabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #ifdef CONFIG_ALPHA_LARGE_VMALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (address >= TASK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) goto vmalloc_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) flags |= FAULT_FLAG_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) vma = find_vma(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (!vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (vma->vm_start <= address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) goto good_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (!(vma->vm_flags & VM_GROWSDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (expand_stack(vma, address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* Ok, we have a good vm_area for this memory access, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) we can handle it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) good_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) si_code = SEGV_ACCERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (cause < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (!(vma->vm_flags & VM_EXEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) } else if (!cause) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* Allow reads even for write-only mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (!(vma->vm_flags & VM_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) flags |= FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* If for any reason at all we couldn't handle the fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) make sure we exit gracefully rather than endlessly redo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) the fault. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) fault = handle_mm_fault(vma, address, flags, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (fault_signal_pending(fault, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if (unlikely(fault & VM_FAULT_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (fault & VM_FAULT_OOM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) goto out_of_memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) else if (fault & VM_FAULT_SIGSEGV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) goto bad_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) else if (fault & VM_FAULT_SIGBUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) goto do_sigbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (flags & FAULT_FLAG_ALLOW_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (fault & VM_FAULT_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) flags |= FAULT_FLAG_TRIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* No need to mmap_read_unlock(mm) as we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * have already released it in __lock_page_or_retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * in mm/filemap.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* Something tried to access memory that isn't in our memory map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) Fix it, but check if it's kernel or user first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) bad_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) goto do_sigsegv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) no_context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* Are we prepared to handle this fault as an exception? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if ((fixup = search_exception_tables(regs->pc)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) unsigned long newpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) newpc = fixup_exception(dpf_reg, fixup, regs->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) regs->pc = newpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* Oops. The kernel tried to access some bad page. We'll have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) terminate things with extreme prejudice. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) printk(KERN_ALERT "Unable to handle kernel paging request at "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) "virtual address %016lx\n", address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) do_exit(SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* We ran out of memory, or some other thing happened to us that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) made us unable to handle the page fault gracefully. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) out_of_memory:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) pagefault_out_of_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) do_sigbus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* Send a sigbus, regardless of whether we were in kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) or user mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) do_sigsegv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) force_sig_fault(SIGSEGV, si_code, (void __user *) address, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #ifdef CONFIG_ALPHA_LARGE_VMALLOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) vmalloc_fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) goto do_sigsegv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* Synchronize this task's top level page-table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) with the "reference" page table from init. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) long index = pgd_index(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) pgd_t *pgd, *pgd_k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) pgd = current->active_mm->pgd + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) pgd_k = swapper_pg_dir + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (!pgd_present(*pgd) && pgd_present(*pgd_k)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) pgd_val(*pgd) = pgd_val(*pgd_k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) goto no_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }