Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Common SMP CPU bringup/teardown functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/smpboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "smpboot.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * For the hotplug case we keep the task structs around and reuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) static DEFINE_PER_CPU(struct task_struct *, idle_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) struct task_struct *idle_thread_get(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct task_struct *tsk = per_cpu(idle_threads, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	if (!tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	return tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) void __init idle_thread_set_boot_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	per_cpu(idle_threads, smp_processor_id()) = current;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * idle_init - Initialize the idle thread for a cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * @cpu:	The cpu for which the idle thread should be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * Creates the thread if it does not exist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) static inline void idle_init(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	struct task_struct *tsk = per_cpu(idle_threads, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	if (!tsk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		tsk = fork_idle(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		if (IS_ERR(tsk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 			pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 			per_cpu(idle_threads, cpu) = tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * idle_threads_init - Initialize idle threads for all cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) void __init idle_threads_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	unsigned int cpu, boot_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	boot_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		if (cpu != boot_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 			idle_init(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #endif /* #ifdef CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static LIST_HEAD(hotplug_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) static DEFINE_MUTEX(smpboot_threads_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) struct smpboot_thread_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	unsigned int			cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	unsigned int			status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	struct smp_hotplug_thread	*ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	HP_THREAD_NONE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	HP_THREAD_ACTIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	HP_THREAD_PARKED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * smpboot_thread_fn - percpu hotplug thread loop function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * @data:	thread data pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * Checks for thread stop and park conditions. Calls the necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * setup, cleanup, park and unpark functions for the registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * Returns 1 when the thread should exit, 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static int smpboot_thread_fn(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct smpboot_thread_data *td = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct smp_hotplug_thread *ht = td->ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		if (kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 			/* cleanup must mirror setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			if (ht->cleanup && td->status != HP_THREAD_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 				ht->cleanup(td->cpu, cpu_online(td->cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			kfree(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		if (kthread_should_park()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			if (ht->park && td->status == HP_THREAD_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 				BUG_ON(td->cpu != smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 				ht->park(td->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 				td->status = HP_THREAD_PARKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			kthread_parkme();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			/* We might have been woken for stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		BUG_ON(td->cpu != smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		/* Check for state change setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		switch (td->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		case HP_THREAD_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 			__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 			if (ht->setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 				ht->setup(td->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 			td->status = HP_THREAD_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		case HP_THREAD_PARKED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 			preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			if (ht->unpark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 				ht->unpark(td->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			td->status = HP_THREAD_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		if (!ht->thread_should_run(td->cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 			preempt_enable_no_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			__set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			ht->thread_fn(td->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	struct smpboot_thread_data *td;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (!td)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	td->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	td->ht = ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 				    ht->thread_comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	if (IS_ERR(tsk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		kfree(td);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		return PTR_ERR(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	kthread_set_per_cpu(tsk, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	 * Park the thread so that it could start right on the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	 * when it is available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	kthread_park(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	get_task_struct(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	*per_cpu_ptr(ht->store, cpu) = tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	if (ht->create) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		 * Make sure that the task has actually scheduled out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		 * into park position, before calling the create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		 * callback. At least the migration thread callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		 * requires that the task is off the runqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		if (!wait_task_inactive(tsk, TASK_PARKED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			ht->create(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) int smpboot_create_threads(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	struct smp_hotplug_thread *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	mutex_lock(&smpboot_threads_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	list_for_each_entry(cur, &hotplug_threads, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		ret = __smpboot_create_thread(cur, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	mutex_unlock(&smpboot_threads_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	if (!ht->selfparking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		kthread_unpark(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int smpboot_unpark_threads(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	struct smp_hotplug_thread *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	mutex_lock(&smpboot_threads_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	list_for_each_entry(cur, &hotplug_threads, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		smpboot_unpark_thread(cur, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	mutex_unlock(&smpboot_threads_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	if (tsk && !ht->selfparking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		kthread_park(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int smpboot_park_threads(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	struct smp_hotplug_thread *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	mutex_lock(&smpboot_threads_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	list_for_each_entry_reverse(cur, &hotplug_threads, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		smpboot_park_thread(cur, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	mutex_unlock(&smpboot_threads_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	/* We need to destroy also the parked threads of offline cpus */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		if (tsk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 			kthread_stop(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			put_task_struct(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 			*per_cpu_ptr(ht->store, cpu) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)  * smpboot_register_percpu_thread - Register a per_cpu thread related
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  * 					    to hotplug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)  * @plug_thread:	Hotplug thread descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * Creates and starts the threads on all online cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	mutex_lock(&smpboot_threads_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 		ret = __smpboot_create_thread(plug_thread, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			smpboot_destroy_threads(plug_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		smpboot_unpark_thread(plug_thread, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	list_add(&plug_thread->list, &hotplug_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	mutex_unlock(&smpboot_threads_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)  * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)  * @plug_thread:	Hotplug thread descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)  * Stops all threads on all possible cpus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	mutex_lock(&smpboot_threads_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	list_del(&plug_thread->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	smpboot_destroy_threads(plug_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	mutex_unlock(&smpboot_threads_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)  * Called to poll specified CPU's state, for example, when waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)  * a CPU to come online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) int cpu_report_state(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	return atomic_read(&per_cpu(cpu_hotplug_state, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  * If CPU has died properly, set its state to CPU_UP_PREPARE and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)  * return success.  Otherwise, return -EBUSY if the CPU died after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)  * cpu_wait_death() timed out.  And yet otherwise again, return -EAGAIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  * if cpu_wait_death() timed out and the CPU still hasn't gotten around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  * to dying.  In the latter two cases, the CPU might not be set up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  * properly, but it is up to the arch-specific code to decide.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  * Finally, -EIO indicates an unanticipated problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)  * Note that it is permissible to omit this call entirely, as is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)  * done in architectures that do no CPU-hotplug error checking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) int cpu_check_up_prepare(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	case CPU_POST_DEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		/* The CPU died properly, so just start it up again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	case CPU_DEAD_FROZEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		 * Timeout during CPU death, so let caller know.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		 * The outgoing CPU completed its processing, but after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		 * cpu_wait_death() timed out and reported the error. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		 * caller is free to proceed, in which case the state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		 * will be reset properly by cpu_set_state_online().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		 * Proceeding despite this -EBUSY return makes sense
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		 * for systems where the outgoing CPUs take themselves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		 * offline, with no post-death manipulation required from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		 * a surviving CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	case CPU_BROKEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		 * The most likely reason we got here is that there was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		 * a timeout during CPU death, and the outgoing CPU never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		 * did complete its processing.  This could happen on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		 * a virtualized system if the outgoing VCPU gets preempted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		 * for more than five seconds, and the user attempts to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		 * immediately online that same CPU.  Trying again later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		 * might return -EBUSY above, hence -EAGAIN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		/* Should not happen.  Famous last words. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)  * Mark the specified CPU online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)  * Note that it is permissible to omit this call entirely, as is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)  * done in architectures that do no CPU-hotplug error checking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) void cpu_set_state_online(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	(void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)  * Wait for the specified CPU to exit the idle loop and die.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) bool cpu_wait_death(unsigned int cpu, int seconds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	int jf_left = seconds * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	int oldstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	int sleep_jf = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	might_sleep();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	/* The outgoing CPU will normally get done quite quickly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		goto update_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	udelay(5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	/* But if the outgoing CPU dawdles, wait increasingly long times. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		schedule_timeout_uninterruptible(sleep_jf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		jf_left -= sleep_jf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		if (jf_left <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) update_state:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	if (oldstate == CPU_DEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		/* Outgoing CPU died normally, update state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		smp_mb(); /* atomic_read() before update. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		/* Outgoing CPU still hasn't died, set state accordingly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 				   oldstate, CPU_BROKEN) != oldstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 			goto update_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)  * Called by the outgoing CPU to report its successful death.  Return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)  * false if this report follows the surviving CPU's timing out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)  * A separate "CPU_DEAD_FROZEN" is used when the surviving CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)  * timed out.  This approach allows architectures to omit calls to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)  * cpu_check_up_prepare() and cpu_set_state_online() without defeating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)  * the next cpu_wait_death()'s polling loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) bool cpu_report_death(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	int oldstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	int newstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		if (oldstate != CPU_BROKEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 			newstate = CPU_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 			newstate = CPU_DEAD_FROZEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	} while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 				oldstate, newstate) != oldstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	return newstate == CPU_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) #endif /* #ifdef CONFIG_HOTPLUG_CPU */