^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifndef __ASM_HARDIRQ_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define __ASM_HARDIRQ_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/kvm_arm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/sysreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) typedef struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) unsigned int __softirq_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) } ____cacheline_aligned irq_cpustat_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct nmi_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) u64 hcr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) DECLARE_PER_CPU(struct nmi_ctx, nmi_contexts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define arch_nmi_enter() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct nmi_ctx *___ctx; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u64 ___hcr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (!is_kernel_in_hyp_mode()) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) ___ctx = this_cpu_ptr(&nmi_contexts); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) if (___ctx->cnt) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) ___ctx->cnt++; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) ___hcr = read_sysreg(hcr_el2); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (!(___hcr & HCR_TGE)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) write_sysreg(___hcr | HCR_TGE, hcr_el2); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) isb(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * Make sure the sysreg write is performed before ___ctx->cnt \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * is set to 1. NMIs that see cnt == 1 will rely on us. \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) barrier(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ___ctx->cnt = 1; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Make sure ___ctx->cnt is set before we save ___hcr. We \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * don't want ___ctx->hcr to be overwritten. \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) barrier(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) ___ctx->hcr = ___hcr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define arch_nmi_exit() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct nmi_ctx *___ctx; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) u64 ___hcr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (!is_kernel_in_hyp_mode()) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ___ctx = this_cpu_ptr(&nmi_contexts); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ___hcr = ___ctx->hcr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Make sure we read ___ctx->hcr before we release \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * ___ctx->cnt as it makes ___ctx->hcr updatable again. \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) barrier(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) ___ctx->cnt--; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * Make sure ___ctx->cnt release is visible before we \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * restore the sysreg. Otherwise a new NMI occurring \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * right after write_sysreg() can be fooled and think \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * we secured things for it. \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) barrier(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (!___ctx->cnt && !(___hcr & HCR_TGE)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) write_sysreg(___hcr, hcr_el2); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static inline void ack_bad_irq(unsigned int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) extern unsigned long irq_err_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) irq_err_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #endif /* __ASM_HARDIRQ_H */