^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * S390 version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright IBM Corp. 1999
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author(s): Hartmut Penner (hp@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Ulrich Weigand (uweigand@de.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Derived from "arch/i386/mm/fault.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 1995 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/console.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/hugetlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/diag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/gmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/facility.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/uv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "../kernel/entry.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define __FAIL_ADDR_MASK -4096L
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define __SUBCODE_MASK 0x0600
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define __PF_RES_FIELD 0x8000000000000000ULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define VM_FAULT_BADCONTEXT ((__force vm_fault_t) 0x010000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define VM_FAULT_BADMAP ((__force vm_fault_t) 0x020000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define VM_FAULT_BADACCESS ((__force vm_fault_t) 0x040000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define VM_FAULT_SIGNAL ((__force vm_fault_t) 0x080000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define VM_FAULT_PFAULT ((__force vm_fault_t) 0x100000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) enum fault_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) KERNEL_FAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) USER_FAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) VDSO_FAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) GMAP_FAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static unsigned long store_indication __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static int __init fault_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (test_facility(75))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) store_indication = 0xc00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) early_initcall(fault_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * Find out which address space caused the exception.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static enum fault_type get_fault_type(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned long trans_exc_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) trans_exc_code = regs->int_parm_long & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (likely(trans_exc_code == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* primary space exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (IS_ENABLED(CONFIG_PGSTE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) test_pt_regs_flag(regs, PIF_GUEST_FAULT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return GMAP_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (current->thread.mm_segment == USER_DS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return USER_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return KERNEL_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (trans_exc_code == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* secondary space exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (current->thread.mm_segment & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (current->thread.mm_segment == USER_DS_SACF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return USER_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return KERNEL_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return VDSO_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (trans_exc_code == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* access register mode, not used in the kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return USER_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* home space exception -> access via kernel ASCE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return KERNEL_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static int bad_address(void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned long dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return get_kernel_nofault(dummy, (unsigned long *)p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void dump_pagetable(unsigned long asce, unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) unsigned long *table = __va(asce & _ASCE_ORIGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) pr_alert("AS:%016lx ", asce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) switch (asce & _ASCE_TYPE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) case _ASCE_TYPE_REGION1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) table += (address & _REGION1_INDEX) >> _REGION1_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (bad_address(table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) pr_cont("R1:%016lx ", *table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (*table & _REGION_ENTRY_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) case _ASCE_TYPE_REGION2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) table += (address & _REGION2_INDEX) >> _REGION2_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (bad_address(table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) pr_cont("R2:%016lx ", *table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (*table & _REGION_ENTRY_INVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) case _ASCE_TYPE_REGION3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) table += (address & _REGION3_INDEX) >> _REGION3_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (bad_address(table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) pr_cont("R3:%016lx ", *table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) case _ASCE_TYPE_SEGMENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (bad_address(table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) pr_cont("S:%016lx ", *table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) table += (address & _PAGE_INDEX) >> _PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (bad_address(table))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) pr_cont("P:%016lx ", *table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) pr_cont("BAD\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static void dump_fault_info(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned long asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) pr_alert("Failing address: %016lx TEID: %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) pr_alert("Fault in ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) switch (regs->int_parm_long & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) pr_cont("home space ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) pr_cont("secondary space ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) pr_cont("access register ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) pr_cont("primary space ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) pr_cont("mode while using ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) switch (get_fault_type(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) case USER_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) asce = S390_lowcore.user_asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) pr_cont("user ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) case VDSO_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) asce = S390_lowcore.vdso_asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) pr_cont("vdso ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) case GMAP_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) asce = ((struct gmap *) S390_lowcore.gmap)->asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) pr_cont("gmap ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) case KERNEL_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) asce = S390_lowcore.kernel_asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) pr_cont("kernel ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unreachable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) pr_cont("ASCE.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int show_unhandled_signals = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (!unhandled_signal(current, signr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (!printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) regs->int_code & 0xffff, regs->int_code >> 17);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) print_vma_addr(KERN_CONT "in ", regs->psw.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) printk(KERN_CONT "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (is_mm_fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) dump_fault_info(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) show_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * Send SIGSEGV to task. This is an external routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * to keep the stack usage of do_page_fault small.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static noinline void do_sigsegv(struct pt_regs *regs, int si_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) report_user_fault(regs, SIGSEGV, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) force_sig_fault(SIGSEGV, si_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) const struct exception_table_entry *s390_search_extables(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) const struct exception_table_entry *fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) fixup = search_extable(__start_dma_ex_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) __stop_dma_ex_table - __start_dma_ex_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (!fixup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) fixup = search_exception_tables(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static noinline void do_no_context(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) const struct exception_table_entry *fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* Are we prepared to handle this kernel fault? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) fixup = s390_search_extables(regs->psw.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (fixup && ex_handle(fixup, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Oops. The kernel tried to access some bad page. We'll have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * terminate things with extreme prejudice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (get_fault_type(regs) == KERNEL_FAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) printk(KERN_ALERT "Unable to handle kernel pointer dereference"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) " in virtual kernel address space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) printk(KERN_ALERT "Unable to handle kernel paging request"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) " in virtual user address space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) dump_fault_info(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) die(regs, "Oops");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) do_exit(SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static noinline void do_low_address(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) /* Low-address protection hit in kernel mode means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) NULL pointer write access in kernel mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (regs->psw.mask & PSW_MASK_PSTATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* Low-address protection hit in user mode 'cannot happen'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) die (regs, "Low-address protection");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) do_exit(SIGKILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) do_no_context(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static noinline void do_sigbus(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * Send a sigbus, regardless of whether we were in kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * or user mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) force_sig_fault(SIGBUS, BUS_ADRERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static noinline int signal_return(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) u16 instruction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (instruction == 0x0a77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) set_pt_regs_flag(regs, PIF_SYSCALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) regs->int_code = 0x00040077;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) } else if (instruction == 0x0aad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) set_pt_regs_flag(regs, PIF_SYSCALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) regs->int_code = 0x000400ad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static noinline void do_fault_error(struct pt_regs *regs, int access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) vm_fault_t fault)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) int si_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) switch (fault) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) case VM_FAULT_BADACCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (access == VM_EXEC && signal_return(regs) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) case VM_FAULT_BADMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* Bad memory access. Check if it is kernel or user space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* User mode accesses just cause a SIGSEGV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) si_code = (fault == VM_FAULT_BADMAP) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) SEGV_MAPERR : SEGV_ACCERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) do_sigsegv(regs, si_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) case VM_FAULT_BADCONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) case VM_FAULT_PFAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) do_no_context(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) case VM_FAULT_SIGNAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) do_no_context(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) default: /* fault & VM_FAULT_ERROR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (fault & VM_FAULT_OOM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) do_no_context(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) pagefault_out_of_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) } else if (fault & VM_FAULT_SIGSEGV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* Kernel mode? Handle exceptions or die */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) do_no_context(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) do_sigsegv(regs, SEGV_MAPERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) } else if (fault & VM_FAULT_SIGBUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* Kernel mode? Handle exceptions or die */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) do_no_context(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) do_sigbus(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * This routine handles page faults. It determines the address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * and the problem, and then passes it off to one of the appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * interruption code (int_code):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * 04 Protection -> Write-Protection (suppression)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * 10 Segment translation -> Not present (nullification)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * 11 Page translation -> Not present (nullification)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * 3b Region third trans. -> Not present (nullification)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct gmap *gmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) enum fault_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) unsigned long trans_exc_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) unsigned long address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) vm_fault_t fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) tsk = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * The instruction that caused the program check has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * been nullified. Don't signal single step via SIGTRAP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) clear_pt_regs_flag(regs, PIF_PER_TRAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (kprobe_page_fault(regs, 14))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) mm = tsk->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) trans_exc_code = regs->int_parm_long;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * Verify that the fault happened in user space, that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * we are not in an interrupt and that there is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * user context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) fault = VM_FAULT_BADCONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) type = get_fault_type(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) case KERNEL_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) case VDSO_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) fault = VM_FAULT_BADMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) case USER_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) case GMAP_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (faulthandler_disabled() || !mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) address = trans_exc_code & __FAIL_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) flags = FAULT_FLAG_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) flags |= FAULT_FLAG_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) flags |= FAULT_FLAG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) gmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) gmap = (struct gmap *) S390_lowcore.gmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) current->thread.gmap_addr = address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) current->thread.gmap_int_code = regs->int_code & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) address = __gmap_translate(gmap, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (address == -EFAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) fault = VM_FAULT_BADMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) goto out_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (gmap->pfault_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) flags |= FAULT_FLAG_RETRY_NOWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) fault = VM_FAULT_BADMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) vma = find_vma(mm, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (!vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) goto out_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (unlikely(vma->vm_start > address)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (!(vma->vm_flags & VM_GROWSDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) goto out_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (expand_stack(vma, address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) goto out_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * Ok, we have a good vm_area for this memory access, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * we can handle it..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) fault = VM_FAULT_BADACCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (unlikely(!(vma->vm_flags & access)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) goto out_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (is_vm_hugetlb_page(vma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) address &= HPAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * If for any reason at all we couldn't handle the fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * make sure we exit gracefully rather than endlessly redo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * the fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) fault = handle_mm_fault(vma, address, flags, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (fault_signal_pending(fault, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) fault = VM_FAULT_SIGNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (flags & FAULT_FLAG_RETRY_NOWAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) goto out_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (unlikely(fault & VM_FAULT_ERROR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) goto out_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (flags & FAULT_FLAG_ALLOW_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (fault & VM_FAULT_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) (flags & FAULT_FLAG_RETRY_NOWAIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /* FAULT_FLAG_RETRY_NOWAIT has been set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * mmap_lock has not been released */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) current->thread.gmap_pfault = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) fault = VM_FAULT_PFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) goto out_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) flags &= ~FAULT_FLAG_RETRY_NOWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) flags |= FAULT_FLAG_TRIED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) address = __gmap_link(gmap, current->thread.gmap_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (address == -EFAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) fault = VM_FAULT_BADMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) goto out_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (address == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) fault = VM_FAULT_OOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) goto out_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) fault = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) out_up:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) void do_protection_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) unsigned long trans_exc_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) int access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) vm_fault_t fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) trans_exc_code = regs->int_parm_long;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * Protection exceptions are suppressing, decrement psw address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * The exception to this rule are aborted transactions, for these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * the PSW already points to the correct location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (!(regs->int_code & 0x200))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * Check for low-address protection. This needs to be treated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * as a special case because the translation exception code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * field is not guaranteed to contain valid data in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (unlikely(!(trans_exc_code & 4))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) do_low_address(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) (regs->psw.addr & PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) access = VM_EXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) fault = VM_FAULT_BADACCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) access = VM_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) fault = do_exception(regs, access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (unlikely(fault))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) do_fault_error(regs, access, fault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) NOKPROBE_SYMBOL(do_protection_exception);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) void do_dat_exception(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) int access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) vm_fault_t fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) access = VM_ACCESS_FLAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) fault = do_exception(regs, access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (unlikely(fault))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) do_fault_error(regs, access, fault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) NOKPROBE_SYMBOL(do_dat_exception);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #ifdef CONFIG_PFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * 'pfault' pseudo page faults routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static int pfault_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static int __init nopfault(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) pfault_disable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) __setup("nopfault", nopfault);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct pfault_refbk {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) u16 refdiagc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) u16 reffcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) u16 refdwlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) u16 refversn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) u64 refgaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) u64 refselmk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) u64 refcmpmk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) u64 reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) } __attribute__ ((packed, aligned(8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static struct pfault_refbk pfault_init_refbk = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) .refdiagc = 0x258,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) .reffcode = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) .refdwlen = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) .refversn = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) .refgaddr = __LC_LPP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) .refselmk = 1ULL << 48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) .refcmpmk = 1ULL << 48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) .reserved = __PF_RES_FIELD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) int pfault_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (pfault_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) diag_stat_inc(DIAG_STAT_X258);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) " diag %1,%0,0x258\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) "0: j 2f\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) "1: la %0,8\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) "2:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) EX_TABLE(0b,1b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) : "=d" (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) : "a" (&pfault_init_refbk), "m" (pfault_init_refbk) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static struct pfault_refbk pfault_fini_refbk = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) .refdiagc = 0x258,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) .reffcode = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) .refdwlen = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) .refversn = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) void pfault_fini(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (pfault_disable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) diag_stat_inc(DIAG_STAT_X258);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) " diag %0,0,0x258\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) "0: nopr %%r7\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) EX_TABLE(0b,0b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) : : "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) static DEFINE_SPINLOCK(pfault_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) static LIST_HEAD(pfault_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) #define PF_COMPLETE 0x0080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * The mechanism of our pfault code: if Linux is running as guest, runs a user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * space process and the user space process accesses a page that the host has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * paged out we get a pfault interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * This allows us, within the guest, to schedule a different process. Without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * this mechanism the host would have to suspend the whole virtual cpu until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * the page has been paged in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * So when we get such an interrupt then we set the state of the current task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * to uninterruptible and also set the need_resched flag. Both happens within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * interrupt context(!). If we later on want to return to user space we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * recognize the need_resched flag and then call schedule(). It's not very
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * obvious how this works...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * Of course we have a lot of additional fun with the completion interrupt (->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * host signals that a page of a process has been paged in and the process can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * continue to run). This interrupt can arrive on any cpu and, since we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * virtual cpus, actually appear before the interrupt that signals that a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * is missing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static void pfault_interrupt(struct ext_code ext_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) unsigned int param32, unsigned long param64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) __u16 subcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) pid_t pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * Get the external interruption subcode & pfault initial/completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * signal bit. VM stores this in the 'cpu address' field associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * with the external interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) subcode = ext_code.subcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if ((subcode & 0xff00) != __SUBCODE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) inc_irq_stat(IRQEXT_PFL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /* Get the token (= pid of the affected task). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) pid = param64 & LPP_PID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) tsk = find_task_by_pid_ns(pid, &init_pid_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) get_task_struct(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) if (!tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) spin_lock(&pfault_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (subcode & PF_COMPLETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) /* signal bit is set -> a page has been swapped in by VM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (tsk->thread.pfault_wait == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) /* Initial interrupt was faster than the completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * interrupt. pfault_wait is valid. Set pfault_wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * back to zero and wake up the process. This can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * safely be done because the task is still sleeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * and can't produce new pfaults. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) tsk->thread.pfault_wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) list_del(&tsk->thread.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) wake_up_process(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) put_task_struct(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) /* Completion interrupt was faster than initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * interrupt. Set pfault_wait to -1 so the initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * interrupt doesn't put the task to sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * If the task is not running, ignore the completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * interrupt since it must be a leftover of a PFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * CANCEL operation which didn't remove all pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * completion interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (tsk->state == TASK_RUNNING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) tsk->thread.pfault_wait = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /* signal bit not set -> a real page is missing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (WARN_ON_ONCE(tsk != current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (tsk->thread.pfault_wait == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* Already on the list with a reference: put to sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) goto block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) } else if (tsk->thread.pfault_wait == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* Completion interrupt was faster than the initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * interrupt (pfault_wait == -1). Set pfault_wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * back to zero and exit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) tsk->thread.pfault_wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /* Initial interrupt arrived before completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * interrupt. Let the task sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * An extra task reference is needed since a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * cpu may set the task state to TASK_RUNNING again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * before the scheduler is reached. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) get_task_struct(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) tsk->thread.pfault_wait = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) list_add(&tsk->thread.list, &pfault_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* Since this must be a userspace fault, there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * is no kernel task state to trample. Rely on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * return to userspace schedule() to block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) __set_current_state(TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) set_tsk_need_resched(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) set_preempt_need_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) spin_unlock(&pfault_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) put_task_struct(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) static int pfault_cpu_dead(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct thread_struct *thread, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct task_struct *tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) spin_lock_irq(&pfault_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) list_for_each_entry_safe(thread, next, &pfault_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) thread->pfault_wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) list_del(&thread->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) tsk = container_of(thread, struct task_struct, thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) wake_up_process(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) put_task_struct(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) spin_unlock_irq(&pfault_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) static int __init pfault_irq_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) goto out_extint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) goto out_pfault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) cpuhp_setup_state_nocalls(CPUHP_S390_PFAULT_DEAD, "s390/pfault:dead",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) NULL, pfault_cpu_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) out_pfault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) out_extint:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) pfault_disable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) early_initcall(pfault_irq_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) #endif /* CONFIG_PFAULT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) #if IS_ENABLED(CONFIG_PGSTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) void do_secure_storage_access(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct mm_struct *mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * bit 61 tells us if the address is valid, if it's not we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * have a major problem and should stop the kernel or send a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * SIGSEGV to the process. Unfortunately bit 61 is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * reliable without the misc UV feature so we need to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * for that as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (test_bit_inv(BIT_UV_FEAT_MISC, &uv_info.uv_feature_indications) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) !test_bit_inv(61, ®s->int_parm_long)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * When this happens, userspace did something that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * was not supposed to do, e.g. branching into secure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * memory. Trigger a segmentation fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) send_sig(SIGSEGV, current, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * The kernel should never run into this case and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * have no way out of this situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) panic("Unexpected PGM 0x3d with TEID bit 61=0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) switch (get_fault_type(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) case USER_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) vma = find_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (!vma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (IS_ERR_OR_NULL(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (arch_make_page_accessible(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) send_sig(SIGSEGV, current, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) case KERNEL_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) page = phys_to_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (unlikely(!try_get_page(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) rc = arch_make_page_accessible(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) case VDSO_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) case GMAP_FAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) NOKPROBE_SYMBOL(do_secure_storage_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) void do_non_secure_storage_access(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) if (get_fault_type(regs) != GMAP_FAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) do_fault_error(regs, VM_READ | VM_WRITE, VM_FAULT_BADMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) send_sig(SIGSEGV, current, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) NOKPROBE_SYMBOL(do_non_secure_storage_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) void do_secure_storage_violation(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * Either KVM messed up the secure guest mapping or the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * page is mapped into multiple secure guests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * This exception is only triggered when a guest 2 is running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * and can therefore never occur in kernel context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) printk_ratelimited(KERN_WARNING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) "Secure storage violation in task: %s, pid %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) current->comm, current->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) send_sig(SIGSEGV, current, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) void do_secure_storage_access(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) default_trap_handler(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) void do_non_secure_storage_access(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) default_trap_handler(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) void do_secure_storage_violation(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) default_trap_handler(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) #endif