Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/semaphore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/eeh_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/ppc-pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) /** Overview:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *  EEH error states may be detected within exception handlers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *  however, the recovery processing needs to occur asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  *  in a normal kernel context and not an interrupt context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *  This pair of routines creates an event and queues it onto a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *  work-queue, where a worker thread can drive recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) static DEFINE_SPINLOCK(eeh_eventlist_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) static DECLARE_COMPLETION(eeh_eventlist_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static LIST_HEAD(eeh_eventlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * eeh_event_handler - Dispatch EEH events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * @dummy - unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * The detection of a frozen slot can occur inside an interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * where it can be hard to do anything about it.  The goal of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * routine is to pull these detection events out of the context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * of the interrupt handler, and re-dispatch them for processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * at a later time in a normal context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) static int eeh_event_handler(void * dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	struct eeh_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		if (wait_for_completion_interruptible(&eeh_eventlist_event))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		/* Fetch EEH event from the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		spin_lock_irqsave(&eeh_eventlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		event = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		if (!list_empty(&eeh_eventlist)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			event = list_entry(eeh_eventlist.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 					   struct eeh_event, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 			list_del(&event->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		if (!event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		/* We might have event without binding PE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		if (event->pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 			eeh_handle_normal_event(event->pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 			eeh_handle_special_event();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		kfree(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * eeh_event_init - Start kernel thread to handle EEH events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * This routine is called to start the kernel thread for processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  * EEH event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) int eeh_event_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	struct task_struct *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	t = kthread_run(eeh_event_handler, NULL, "eehd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (IS_ERR(t)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		ret = PTR_ERR(t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		pr_err("%s: Failed to start EEH daemon (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 			__func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * eeh_send_failure_event - Generate a PCI error event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * @pe: EEH PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * This routine can be called within an interrupt context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * the actual event will be delivered in a normal context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * (from a workqueue).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int __eeh_send_failure_event(struct eeh_pe *pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	struct eeh_event *event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	event = kzalloc(sizeof(*event), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	if (!event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		pr_err("EEH: out of memory, event not handled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	event->pe = pe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	 * Mark the PE as recovering before inserting it in the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	 * This prevents the PE from being free()ed by a hotplug driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	 * while the PE is sitting in the event queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if (pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #ifdef CONFIG_STACKTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		 * Save the current stack trace so we can dump it from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		 * event handler thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		pe->trace_entries = stack_trace_save(pe->stack_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 					 ARRAY_SIZE(pe->stack_trace), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #endif /* CONFIG_STACKTRACE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	/* We may or may not be called in an interrupt context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	spin_lock_irqsave(&eeh_eventlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	list_add(&event->list, &eeh_eventlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	/* For EEH deamon to knick in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	complete(&eeh_eventlist_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int eeh_send_failure_event(struct eeh_pe *pe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	 * If we've manually supressed recovery events via debugfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	 * then just drop it on the floor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (eeh_debugfs_no_recover) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		pr_err("EEH: Event dropped due to no_recover setting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	return __eeh_send_failure_event(pe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * eeh_remove_event - Remove EEH event from the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * @pe: Event binding to the PE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * @force: Event will be removed unconditionally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * On PowerNV platform, we might have subsequent coming events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  * is part of the former one. For that case, those subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * coming events are totally duplicated and unnecessary, thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * they should be removed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) void eeh_remove_event(struct eeh_pe *pe, bool force)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	struct eeh_event *event, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	 * If we have NULL PE passed in, we have dead IOC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	 * or we're sure we can report all existing errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	 * by the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	 * With "force", the event with associated PE that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	 * have been isolated, the event won't be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	 * to avoid event lost.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	spin_lock_irqsave(&eeh_eventlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		if (!force && event->pe &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		    (event->pe->state & EEH_PE_ISOLATED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		if (!pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			list_del(&event->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			kfree(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		} else if (pe->type & EEH_PE_PHB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			if (event->pe && event->pe->phb == pe->phb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 				list_del(&event->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 				kfree(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		} else if (event->pe == pe) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			list_del(&event->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			kfree(event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }