^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Detect hard lockups on a system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Note: Most of this code is borrowed heavily from the original softlockup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * detector, so thanks to Ingo for the initial implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * to those contributors as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define pr_fmt(fmt) "NMI watchdog: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/irq_regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static DEFINE_PER_CPU(bool, hard_watchdog_warn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static DEFINE_PER_CPU(struct perf_event *, dead_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static struct cpumask dead_events_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static unsigned long hardlockup_allcpu_dumped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static atomic_t watchdog_cpus = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) notrace void arch_touch_nmi_watchdog(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Using __raw here because some code paths have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * preemption enabled. If preemption is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * then interrupts should be enabled too, in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * case we shouldn't have to worry about the watchdog
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * going off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) raw_cpu_write(watchdog_nmi_touch, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) EXPORT_SYMBOL(arch_touch_nmi_watchdog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static DEFINE_PER_CPU(ktime_t, last_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static DEFINE_PER_CPU(unsigned int, nmi_rearmed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static ktime_t watchdog_hrtimer_sample_threshold __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void watchdog_update_hrtimer_threshold(u64 period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * The hrtimer runs with a period of (watchdog_threshold * 2) / 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * So it runs effectively with 2.5 times the rate of the NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * watchdog. That means the hrtimer should fire 2-3 times before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * the NMI watchdog expires. The NMI watchdog on x86 is based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * might run way faster than expected and the NMI fires in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * smaller period than the one deduced from the nominal CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * frequency. Depending on the Turbo-Mode factor this might be fast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * enough to get the NMI period smaller than the hrtimer watchdog
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * period and trigger false positives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * The sample threshold is used to check in the NMI handler whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * the minimum time between two NMI samples has elapsed. That
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * prevents false positives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Set this to 4/5 of the actual watchdog threshold period so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * hrtimer is guaranteed to fire at least once within the real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * watchdog threshold.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) watchdog_hrtimer_sample_threshold = period * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static bool watchdog_check_timestamp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) ktime_t delta, now = ktime_get_mono_fast_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) delta = now - __this_cpu_read(last_timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (delta < watchdog_hrtimer_sample_threshold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * If ktime is jiffies based, a stalled timer would prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * jiffies from being incremented and the filter would look
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * at a stale timestamp and never trigger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (__this_cpu_inc_return(nmi_rearmed) < 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) __this_cpu_write(nmi_rearmed, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) __this_cpu_write(last_timestamp, now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static inline bool watchdog_check_timestamp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static struct perf_event_attr wd_hw_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) .type = PERF_TYPE_HARDWARE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) .config = PERF_COUNT_HW_CPU_CYCLES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .size = sizeof(struct perf_event_attr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .pinned = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) .disabled = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Callback function for perf event subsystem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static void watchdog_overflow_callback(struct perf_event *event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct perf_sample_data *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Ensure the watchdog never gets throttled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) event->hw.interrupts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (__this_cpu_read(watchdog_nmi_touch) == true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) __this_cpu_write(watchdog_nmi_touch, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (!watchdog_check_timestamp())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* check for a hardlockup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * This is done by making sure our timer interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * is incrementing. The timer interrupt should have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * fired multiple times before we overflow'd. If it hasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * then this is a good indication the cpu is stuck
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (is_hardlockup()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int this_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* only print hardlockups once */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (__this_cpu_read(hard_watchdog_warn) == true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) this_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) print_modules();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) print_irqtrace_events(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) show_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * Perform all-CPU dump only once to avoid multiple hardlockups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * generating interleaving traces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (sysctl_hardlockup_all_cpu_backtrace &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) !test_and_set_bit(0, &hardlockup_allcpu_dumped))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) trigger_allbutself_cpu_backtrace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (hardlockup_panic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) nmi_panic(regs, "Hard LOCKUP");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) __this_cpu_write(hard_watchdog_warn, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) __this_cpu_write(hard_watchdog_warn, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static int hardlockup_detector_event_create(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) unsigned int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct perf_event_attr *wd_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct perf_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) wd_attr = &wd_hw_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* Try to register using hardware perf events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) watchdog_overflow_callback, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (IS_ERR(evt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) pr_debug("Perf event create on CPU %d failed with %ld\n", cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) PTR_ERR(evt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return PTR_ERR(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) this_cpu_write(watchdog_ev, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * hardlockup_detector_perf_enable - Enable the local event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) void hardlockup_detector_perf_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (hardlockup_detector_event_create())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* use original value for check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (!atomic_fetch_inc(&watchdog_cpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) perf_event_enable(this_cpu_read(watchdog_ev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * hardlockup_detector_perf_disable - Disable the local event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) void hardlockup_detector_perf_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) struct perf_event *event = this_cpu_read(watchdog_ev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) perf_event_disable(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) this_cpu_write(watchdog_ev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) this_cpu_write(dead_event, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) atomic_dec(&watchdog_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * Called from lockup_detector_cleanup(). Serialized by the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) void hardlockup_detector_perf_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) for_each_cpu(cpu, &dead_events_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct perf_event *event = per_cpu(dead_event, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * Required because for_each_cpu() reports unconditionally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * CPU0 as set on UP kernels. Sigh.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) perf_event_release_kernel(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) per_cpu(dead_event, cpu) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) cpumask_clear(&dead_events_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * hardlockup_detector_perf_stop - Globally stop watchdog events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * Special interface for x86 to handle the perf HT bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) void __init hardlockup_detector_perf_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) lockdep_assert_cpus_held();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct perf_event *event = per_cpu(watchdog_ev, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) perf_event_disable(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * hardlockup_detector_perf_restart - Globally restart watchdog events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * Special interface for x86 to handle the perf HT bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) void __init hardlockup_detector_perf_restart(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) lockdep_assert_cpus_held();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct perf_event *event = per_cpu(watchdog_ev, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) perf_event_enable(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * hardlockup_detector_perf_init - Probe whether NMI event is available at all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int __init hardlockup_detector_perf_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) int ret = hardlockup_detector_event_create();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) pr_info("Perf NMI watchdog permanently disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) perf_event_release_kernel(this_cpu_read(watchdog_ev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) this_cpu_write(watchdog_ev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }