^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (C) 2017 Arm Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define pr_fmt(fmt) "sdei: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/arm-smccc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/arm_sdei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/irqflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/scs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/exception.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/sysreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/vmap_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned long sdei_exit_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * register, meaning SDEI has to switch to its own stack. We need two stacks as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * a critical event may interrupt a normal event that has just taken a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * synchronous exception, and is using sp as scratch register. For a critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * event interrupting a normal event, we can't reliably tell if we were on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * sdei stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * For now, we allocate stacks when the driver is probed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) DECLARE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #ifdef CONFIG_SHADOW_CALL_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_normal_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) DEFINE_PER_CPU(unsigned long *, sdei_shadow_call_stack_critical_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned long *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) p = per_cpu(*ptr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) per_cpu(*ptr, cpu) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) vfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static void free_sdei_stacks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (!IS_ENABLED(CONFIG_VMAP_STACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) _free_sdei_stack(&sdei_stack_normal_ptr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) _free_sdei_stack(&sdei_stack_critical_ptr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned long *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) per_cpu(*ptr, cpu) = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static int init_sdei_stacks(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (!IS_ENABLED(CONFIG_VMAP_STACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) free_sdei_stacks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static void _free_sdei_scs(unsigned long * __percpu *ptr, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) s = per_cpu(*ptr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) per_cpu(*ptr, cpu) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) scs_free(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static void free_sdei_scs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) _free_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) _free_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static int _init_sdei_scs(unsigned long * __percpu *ptr, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) s = scs_alloc(cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) per_cpu(*ptr, cpu) = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static int init_sdei_scs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) err = _init_sdei_scs(&sdei_shadow_call_stack_normal_ptr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) err = _init_sdei_scs(&sdei_shadow_call_stack_critical_ptr, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) free_sdei_scs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned long high = low + SDEI_STACK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned long high = low + SDEI_STACK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (!IS_ENABLED(CONFIG_VMAP_STACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (on_sdei_critical_stack(sp, info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (on_sdei_normal_stack(sp, info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned long sdei_arch_get_entry_point(int conduit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * SDEI works between adjacent exception levels. If we booted at EL1 we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * assume a hypervisor is marshalling events. If we booted at EL2 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * dropped to EL1 because we don't support VHE, then we can't support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * SDEI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) pr_err("Not supported on this hardware/boot configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (init_sdei_stacks())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (init_sdei_scs())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) goto out_err_free_stacks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (arm64_kernel_unmapped_at_el0()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) offset = (unsigned long)__sdei_asm_entry_trampoline -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) (unsigned long)__entry_tramp_text_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return TRAMP_VALIAS + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return (unsigned long)__sdei_asm_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) out_err_free_stacks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) free_sdei_stacks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * __sdei_handler() returns one of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * SDEI_EV_HANDLED - success, return to the interrupted context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * SDEI_EV_FAILED - failure, return this error code to firmare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * virtual-address - success, return to this address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct sdei_registered_event *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) int clobbered_registers = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) u64 elr = read_sysreg(elr_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unsigned long vbar = read_sysreg(vbar_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (arm64_kernel_unmapped_at_el0())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) clobbered_registers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* Retrieve the missing registers values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) for (i = 0; i < clobbered_registers; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* from within the handler, this call always succeeds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) sdei_api_event_context(i, ®s->regs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * We didn't take an exception to get here, set PAN. UAO will be cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * by sdei_event_handler()s force_uaccess_begin() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) __uaccess_enable_hw_pan();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) err = sdei_event_handler(regs, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return SDEI_EV_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (elr != read_sysreg(elr_el1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * We took a synchronous exception from the SDEI handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * This could deadlock, and if you interrupt KVM it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * hyp-panic instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) pr_warn("unsafe: exception during handler\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * If we interrupted the kernel with interrupts masked, we always go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * back to wherever we came from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (mode == kernel_mode && !interrupts_enabled(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return SDEI_EV_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * Otherwise, we pretend this was an IRQ. This lets user space tasks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * receive signals before we return to them, and KVM to invoke it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * world switch to do the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * address'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (mode == kernel_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return vbar + 0x280;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) else if (mode & PSR_MODE32_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return vbar + 0x680;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return vbar + 0x480;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) asmlinkage noinstr unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) arm64_enter_nmi(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ret = _sdei_handler(regs, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) arm64_exit_nmi(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }