^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * SMP related functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright IBM Corp. 1999, 2012
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author(s): Denis Joseph Barrow,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Martin Schwidefsky <schwidefsky@de.ibm.com>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Heiko Carstens <heiko.carstens@de.ibm.com>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * based on other smp stuff by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * (c) 1998 Ingo Molnar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * The code outside of smp.c uses logical cpu numbers, only smp.c does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * the translation of logical to physical cpu ids. All new code that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * operates on physical cpu numbers needs to go into smp.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define KMSG_COMPONENT "cpu"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/kernel_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/irqflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/sched/hotplug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/diag.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <asm/switch_to.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <asm/facility.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <asm/ipl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <asm/vtimer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <asm/lowcore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <asm/sclp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <asm/vdso.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <asm/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <asm/os_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <asm/sigp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <asm/idle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <asm/nmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <asm/topology.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include "entry.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) ec_schedule = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ec_call_function_single,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) ec_stop_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ec_mcck_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) CPU_STATE_STANDBY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) CPU_STATE_CONFIGURED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static DEFINE_PER_CPU(struct cpu *, cpu_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct pcpu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct lowcore *lowcore; /* lowcore page(s) for the cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) unsigned long ec_mask; /* bit mask for ec_xxx functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) unsigned long ec_clk; /* sigp timestamp for ec_xxx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) signed char state; /* physical cpu state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) signed char polarization; /* physical polarization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u16 address; /* physical cpu address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static u8 boot_core_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static struct pcpu pcpu_devices[NR_CPUS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) unsigned int smp_cpu_mt_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) EXPORT_SYMBOL(smp_cpu_mt_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) unsigned int smp_cpu_mtid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) EXPORT_SYMBOL(smp_cpu_mtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #ifdef CONFIG_CRASH_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static unsigned int smp_max_threads __initdata = -1U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static int __init early_nosmt(char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) smp_max_threads = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) early_param("nosmt", early_nosmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static int __init early_smt(char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) get_option(&s, &smp_max_threads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) early_param("smt", early_smt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * The smp_cpu_state_mutex must be held when changing the state or polarization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * member of a pcpu data structure within the pcpu_devices arreay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) DEFINE_MUTEX(smp_cpu_state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Signal processor helper functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static inline int __pcpu_sigp_relax(u16 addr, u8 order, unsigned long parm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) cc = __pcpu_sigp(addr, order, parm, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (cc != SIGP_CC_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int cc, retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) for (retry = 0; ; retry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) cc = __pcpu_sigp(pcpu->address, order, parm, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (cc != SIGP_CC_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (retry >= 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static inline int pcpu_stopped(struct pcpu *pcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (__pcpu_sigp(pcpu->address, SIGP_SENSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 0, &status) != SIGP_CC_STATUS_STORED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return !!(status & (SIGP_STATUS_CHECK_STOP|SIGP_STATUS_STOPPED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline int pcpu_running(struct pcpu *pcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (__pcpu_sigp(pcpu->address, SIGP_SENSE_RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 0, NULL) != SIGP_CC_STATUS_STORED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* Status stored condition code is equivalent to cpu not running. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * Find struct pcpu by cpu address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static struct pcpu *pcpu_find_address(const struct cpumask *mask, u16 address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) for_each_cpu(cpu, mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (pcpu_devices[cpu].address == address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return pcpu_devices + cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (test_and_set_bit(ec_bit, &pcpu->ec_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) order = pcpu_running(pcpu) ? SIGP_EXTERNAL_CALL : SIGP_EMERGENCY_SIGNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) pcpu->ec_clk = get_tod_clock_fast();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) pcpu_sigp_retry(pcpu, order, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned long async_stack, nodat_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct lowcore *lc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (pcpu != &pcpu_devices[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) pcpu->lowcore = (struct lowcore *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) nodat_stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (!pcpu->lowcore || !nodat_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) async_stack = stack_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (!async_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) lc = pcpu->lowcore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) memcpy(lc, &S390_lowcore, 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) memset((char *) lc + 512, 0, sizeof(*lc) - 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) lc->async_stack = async_stack + STACK_INIT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) lc->nodat_stack = nodat_stack + STACK_INIT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) lc->cpu_nr = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) lc->spinlock_lockval = arch_spin_lockval(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) lc->spinlock_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) lc->br_r1_trampoline = 0x07f1; /* br %r1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) lc->preempt_count = PREEMPT_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (nmi_alloc_per_cpu(lc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) goto out_async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (vdso_alloc_per_cpu(lc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) goto out_mcesa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) lowcore_ptr[cpu] = lc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, (u32)(unsigned long) lc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) out_mcesa:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) nmi_free_per_cpu(lc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) out_async:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) stack_free(async_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (pcpu != &pcpu_devices[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) free_pages(nodat_stack, THREAD_SIZE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static void pcpu_free_lowcore(struct pcpu *pcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) unsigned long async_stack, nodat_stack, lowcore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) nodat_stack = pcpu->lowcore->nodat_stack - STACK_INIT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) async_stack = pcpu->lowcore->async_stack - STACK_INIT_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) lowcore = (unsigned long) pcpu->lowcore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) pcpu_sigp_retry(pcpu, SIGP_SET_PREFIX, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) lowcore_ptr[pcpu - pcpu_devices] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) vdso_free_per_cpu(pcpu->lowcore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) nmi_free_per_cpu(pcpu->lowcore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) stack_free(async_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (pcpu == &pcpu_devices[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) free_pages(nodat_stack, THREAD_SIZE_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) free_pages(lowcore, LC_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct lowcore *lc = pcpu->lowcore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) lc->cpu_nr = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) lc->spinlock_lockval = arch_spin_lockval(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) lc->spinlock_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) lc->percpu_offset = __per_cpu_offset[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) lc->kernel_asce = S390_lowcore.kernel_asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) lc->user_asce = S390_lowcore.kernel_asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) lc->machine_flags = S390_lowcore.machine_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) lc->user_timer = lc->system_timer =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) lc->steal_timer = lc->avg_steal_timer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) __ctl_store(lc->cregs_save_area, 0, 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) lc->cregs_save_area[1] = lc->kernel_asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) lc->cregs_save_area[7] = lc->vdso_asce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) save_access_regs((unsigned int *) lc->access_regs_save_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) sizeof(lc->stfle_fac_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) sizeof(lc->alt_stfle_fac_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) arch_spin_lock_setup(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) struct lowcore *lc = pcpu->lowcore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) lc->kernel_stack = (unsigned long) task_stack_page(tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) lc->current_task = (unsigned long) tsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) lc->lpp = LPP_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) lc->current_pid = tsk->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) lc->user_timer = tsk->thread.user_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) lc->guest_timer = tsk->thread.guest_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) lc->system_timer = tsk->thread.system_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) lc->hardirq_timer = tsk->thread.hardirq_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) lc->softirq_timer = tsk->thread.softirq_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) lc->steal_timer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct lowcore *lc = pcpu->lowcore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) lc->restart_stack = lc->nodat_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) lc->restart_fn = (unsigned long) func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) lc->restart_data = (unsigned long) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) lc->restart_source = -1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Call function via PSW restart on pcpu and stop the current cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static void __pcpu_delegate(void (*func)(void*), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) func(data); /* should not return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) void (*func)(void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) void *data, unsigned long stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) unsigned long source_cpu = stap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (pcpu->address == source_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) CALL_ON_STACK(__pcpu_delegate, stack, 2, func, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* Stop target cpu (if func returns this stops the current cpu). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) pcpu_sigp_retry(pcpu, SIGP_STOP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* Restart func on the target cpu and stop the current cpu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) mem_assign_absolute(lc->restart_stack, stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) mem_assign_absolute(lc->restart_fn, (unsigned long) func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) mem_assign_absolute(lc->restart_data, (unsigned long) data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) mem_assign_absolute(lc->restart_source, source_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) __bpon();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) " brc 2,0b # busy, try again\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) "1: sigp 0,%1,%3 # sigp stop to current cpu\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) " brc 2,1b # busy, try again\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) : : "d" (pcpu->address), "d" (source_cpu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) "K" (SIGP_RESTART), "K" (SIGP_STOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) : "0", "1", "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) for (;;) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * Enable additional logical cpus for multi-threading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static int pcpu_set_smt(unsigned int mtid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (smp_cpu_mtid == mtid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) cc = __pcpu_sigp(0, SIGP_SET_MULTI_THREADING, mtid, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (cc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) smp_cpu_mtid = mtid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) smp_cpu_mt_shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) while (smp_cpu_mtid >= (1U << smp_cpu_mt_shift))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) smp_cpu_mt_shift++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) pcpu_devices[0].address = stap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * Call function on an online CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) void smp_call_online_cpu(void (*func)(void *), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) struct pcpu *pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* Use the current cpu if it is online. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) pcpu = pcpu_find_address(cpu_online_mask, stap());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (!pcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* Use the first online cpu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * Call function on the ipl CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) void smp_call_ipl_cpu(void (*func)(void *), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct lowcore *lc = pcpu_devices->lowcore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (pcpu_devices[0].address == stap())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) lc = &S390_lowcore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) pcpu_delegate(&pcpu_devices[0], func, data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) lc->nodat_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int smp_find_processor_id(u16 address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) for_each_present_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (pcpu_devices[cpu].address == address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) void schedule_mcck_handler(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) pcpu_ec_call(pcpu_devices + smp_processor_id(), ec_mcck_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) bool notrace arch_vcpu_is_preempted(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (pcpu_running(pcpu_devices + cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) EXPORT_SYMBOL(arch_vcpu_is_preempted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) void notrace smp_yield_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (!MACHINE_HAS_DIAG9C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) diag_stat_inc_norecursion(DIAG_STAT_X09C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) asm volatile("diag %0,0,0x9c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) : : "d" (pcpu_devices[cpu].address));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * Send cpus emergency shutdown signal. This gives the cpus the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * opportunity to complete outstanding interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) void notrace smp_emergency_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) cpumask_t cpumask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) u64 end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) cpumask_copy(&cpumask, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) cpumask_clear_cpu(smp_processor_id(), &cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) end = get_tod_clock() + (1000000UL << 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) for_each_cpu(cpu, &cpumask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct pcpu *pcpu = pcpu_devices + cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) set_bit(ec_stop_cpu, &pcpu->ec_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) while (__pcpu_sigp(pcpu->address, SIGP_EMERGENCY_SIGNAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 0, NULL) == SIGP_CC_BUSY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) get_tod_clock() < end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) while (get_tod_clock() < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) for_each_cpu(cpu, &cpumask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (pcpu_stopped(pcpu_devices + cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) cpumask_clear_cpu(cpu, &cpumask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (cpumask_empty(&cpumask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) NOKPROBE_SYMBOL(smp_emergency_stop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * Stop all cpus but the current one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) void smp_send_stop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /* Disable all interrupts/machine checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) trace_hardirqs_off();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) debug_set_critical();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (oops_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) smp_emergency_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* stop all processors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (cpu == smp_processor_id())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) pcpu_sigp_retry(pcpu_devices + cpu, SIGP_STOP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) while (!pcpu_stopped(pcpu_devices + cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * This is the main routine where commands issued by other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * cpus are handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static void smp_handle_ext_call(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) unsigned long bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* handle bit signal external calls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (test_bit(ec_stop_cpu, &bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) smp_stop_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (test_bit(ec_schedule, &bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) scheduler_ipi();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (test_bit(ec_call_function_single, &bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) generic_smp_call_function_single_interrupt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (test_bit(ec_mcck_pending, &bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) s390_handle_mcck();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static void do_ext_call_interrupt(struct ext_code ext_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) unsigned int param32, unsigned long param64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) smp_handle_ext_call();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) void arch_send_call_function_ipi_mask(const struct cpumask *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) for_each_cpu(cpu, mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) void arch_send_call_function_single_ipi(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * this function sends a 'reschedule' IPI to another CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * it goes straight through and wastes no time serializing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * anything. Worst case is that we lose a reschedule ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) void smp_send_reschedule(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) * parameter area for the set/clear control bit callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct ec_creg_mask_parms {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) unsigned long orval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) unsigned long andval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) int cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * callback for setting/clearing control bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static void smp_ctl_bit_callback(void *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct ec_creg_mask_parms *pp = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) unsigned long cregs[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) __ctl_store(cregs, 0, 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) __ctl_load(cregs, 0, 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * Set a bit in a control register of all cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) void smp_ctl_set_bit(int cr, int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) on_each_cpu(smp_ctl_bit_callback, &parms, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) EXPORT_SYMBOL(smp_ctl_set_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * Clear a bit in a control register of all cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) void smp_ctl_clear_bit(int cr, int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) on_each_cpu(smp_ctl_bit_callback, &parms, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) EXPORT_SYMBOL(smp_ctl_clear_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) #ifdef CONFIG_CRASH_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) int smp_store_status(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct pcpu *pcpu = pcpu_devices + cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) unsigned long pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) pa = __pa(&pcpu->lowcore->floating_pt_save_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_STATUS_AT_ADDRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (!MACHINE_HAS_VX && !MACHINE_HAS_GS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) pa = __pa(pcpu->lowcore->mcesad & MCESA_ORIGIN_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (MACHINE_HAS_GS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) pa |= pcpu->lowcore->mcesad & MCESA_LC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (__pcpu_sigp_relax(pcpu->address, SIGP_STORE_ADDITIONAL_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) pa) != SIGP_CC_ORDER_CODE_ACCEPTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * Collect CPU state of the previous, crashed system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * There are four cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * 1) standard zfcp/nvme dump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * condition: OLDMEM_BASE == NULL && is_ipl_type_dump() == true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * The state for all CPUs except the boot CPU needs to be collected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * with sigp stop-and-store-status. The boot CPU state is located in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * the absolute lowcore of the memory stored in the HSA. The zcore code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * will copy the boot CPU state from the HSA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * 2) stand-alone kdump for SCSI/NVMe (zfcp/nvme dump with swapped memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * condition: OLDMEM_BASE != NULL && is_ipl_type_dump() == true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * The state for all CPUs except the boot CPU needs to be collected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * with sigp stop-and-store-status. The firmware or the boot-loader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) * stored the registers of the boot CPU in the absolute lowcore in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * memory of the old system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * 3) kdump and the old kernel did not store the CPU state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * or stand-alone kdump for DASD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * condition: OLDMEM_BASE != NULL && !is_kdump_kernel()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * The state for all CPUs except the boot CPU needs to be collected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * with sigp stop-and-store-status. The kexec code or the boot-loader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * stored the registers of the boot CPU in the memory of the old system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * 4) kdump and the old kernel stored the CPU state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * condition: OLDMEM_BASE != NULL && is_kdump_kernel()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * This case does not exist for s390 anymore, setup_arch explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * deactivates the elfcorehdr= kernel parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static __init void smp_save_cpu_vxrs(struct save_area *sa, u16 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) bool is_boot_cpu, unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) __vector128 *vxrs = (__vector128 *) page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (is_boot_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) vxrs = boot_cpu_vector_save_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) __pcpu_sigp_relax(addr, SIGP_STORE_ADDITIONAL_STATUS, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) save_area_add_vxrs(sa, vxrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static __init void smp_save_cpu_regs(struct save_area *sa, u16 addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) bool is_boot_cpu, unsigned long page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) void *regs = (void *) page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (is_boot_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) copy_oldmem_kernel(regs, (void *) __LC_FPREGS_SAVE_AREA, 512);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) save_area_add_regs(sa, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) void __init smp_save_dump_cpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) int addr, boot_cpu_addr, max_cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct save_area *sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) unsigned long page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) bool is_boot_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (!(OLDMEM_BASE || is_ipl_type_dump()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /* No previous system present, normal boot. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* Allocate a page as dumping area for the store status sigps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) page = memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE, 0, 1UL << 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) panic("ERROR: Failed to allocate %lx bytes below %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) PAGE_SIZE, 1UL << 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* Set multi-threading state to the previous system. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) pcpu_set_smt(sclp.mtid_prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) boot_cpu_addr = stap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) max_cpu_addr = SCLP_MAX_CORES << sclp.mtid_prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) for (addr = 0; addr <= max_cpu_addr; addr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (__pcpu_sigp_relax(addr, SIGP_SENSE, 0) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) SIGP_CC_NOT_OPERATIONAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) is_boot_cpu = (addr == boot_cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* Allocate save area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) sa = save_area_alloc(is_boot_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (!sa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) panic("could not allocate memory for save area\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (MACHINE_HAS_VX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* Get the vector registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) smp_save_cpu_vxrs(sa, addr, is_boot_cpu, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * For a zfcp/nvme dump OLDMEM_BASE == NULL and the registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * of the boot CPU are stored in the HSA. To retrieve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * these registers an SCLP request is required which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * done by drivers/s390/char/zcore.c:init_cpu_info()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (!is_boot_cpu || OLDMEM_BASE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* Get the CPU registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) smp_save_cpu_regs(sa, addr, is_boot_cpu, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) memblock_free(page, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) diag_dma_ops.diag308_reset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) pcpu_set_smt(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) #endif /* CONFIG_CRASH_DUMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) void smp_cpu_set_polarization(int cpu, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) pcpu_devices[cpu].polarization = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) int smp_cpu_get_polarization(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return pcpu_devices[cpu].polarization;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) int smp_cpu_get_cpu_address(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return pcpu_devices[cpu].address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) static void __ref smp_get_core_info(struct sclp_core_info *info, int early)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static int use_sigp_detection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) int address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (use_sigp_detection || sclp_get_core_info(info, early)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) use_sigp_detection = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) for (address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) address < (SCLP_MAX_CORES << smp_cpu_mt_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) address += (1U << smp_cpu_mt_shift)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (__pcpu_sigp_relax(address, SIGP_SENSE, 0) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) SIGP_CC_NOT_OPERATIONAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) info->core[info->configured].core_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) address >> smp_cpu_mt_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) info->configured++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) info->combined = info->configured;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) static int smp_add_present_cpu(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) static int smp_add_core(struct sclp_core_entry *core, cpumask_t *avail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) bool configured, bool early)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct pcpu *pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) int cpu, nr, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) u16 address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (sclp.has_core_type && core->type != boot_core_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) return nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) cpu = cpumask_first(avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) address = core->core_id << smp_cpu_mt_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) for (i = 0; (i <= smp_cpu_mtid) && (cpu < nr_cpu_ids); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) if (pcpu_find_address(cpu_present_mask, address + i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) pcpu = pcpu_devices + cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) pcpu->address = address + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (configured)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) pcpu->state = CPU_STATE_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) pcpu->state = CPU_STATE_STANDBY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) set_cpu_present(cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (!early && smp_add_present_cpu(cpu) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) set_cpu_present(cpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) nr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) cpumask_clear_cpu(cpu, avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) cpu = cpumask_next(cpu, avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) static int __smp_rescan_cpus(struct sclp_core_info *info, bool early)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct sclp_core_entry *core;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static cpumask_t avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) bool configured;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) u16 core_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) int nr, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * Add IPL core first (which got logical CPU number 0) to make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * that all SMT threads get subsequent logical CPU numbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (early) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) core_id = pcpu_devices[0].address >> smp_cpu_mt_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) for (i = 0; i < info->configured; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) core = &info->core[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (core->core_id == core_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) nr += smp_add_core(core, &avail, true, early);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) for (i = 0; i < info->combined; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) configured = i < info->configured;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) nr += smp_add_core(&info->core[i], &avail, configured, early);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) void __init smp_detect_cpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) unsigned int cpu, mtid, c_cpus, s_cpus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct sclp_core_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) u16 address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) /* Get CPU information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) info = memblock_alloc(sizeof(*info), 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) panic("%s: Failed to allocate %zu bytes align=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) __func__, sizeof(*info), 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) smp_get_core_info(info, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /* Find boot CPU type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (sclp.has_core_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) address = stap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) for (cpu = 0; cpu < info->combined; cpu++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (info->core[cpu].core_id == address) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /* The boot cpu dictates the cpu type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) boot_core_type = info->core[cpu].type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (cpu >= info->combined)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) panic("Could not find boot CPU type");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* Set multi-threading state for the current system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) mtid = boot_core_type ? sclp.mtid : sclp.mtid_cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) mtid = (mtid < smp_max_threads) ? mtid : smp_max_threads - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) pcpu_set_smt(mtid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /* Print number of CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) c_cpus = s_cpus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) for (cpu = 0; cpu < info->combined; cpu++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (sclp.has_core_type &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) info->core[cpu].type != boot_core_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (cpu < info->configured)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) c_cpus += smp_cpu_mtid + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) s_cpus += smp_cpu_mtid + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /* Add CPUs present at boot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) __smp_rescan_cpus(info, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) memblock_free_early((unsigned long)info, sizeof(*info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) static void smp_init_secondary(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) int cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) S390_lowcore.last_update_clock = get_tod_clock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) restore_access_regs(S390_lowcore.access_regs_save_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) set_cpu_flag(CIF_ASCE_PRIMARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) set_cpu_flag(CIF_ASCE_SECONDARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) cpu_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) rcu_cpu_starting(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) init_cpu_timer();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) vtime_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) pfault_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) notify_cpu_starting(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (topology_cpu_dedicated(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) set_cpu_flag(CIF_DEDICATED_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) clear_cpu_flag(CIF_DEDICATED_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) set_cpu_online(cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) update_cpu_masks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) inc_irq_stat(CPU_RST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * Activate a secondary processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) S390_lowcore.restart_stack = (unsigned long) restart_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) S390_lowcore.restart_fn = (unsigned long) do_restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) S390_lowcore.restart_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) S390_lowcore.restart_source = -1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) CALL_ON_STACK_NORETURN(smp_init_secondary, S390_lowcore.kernel_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* Upping and downing of CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) int __cpu_up(unsigned int cpu, struct task_struct *tidle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct pcpu *pcpu = pcpu_devices + cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (pcpu->state != CPU_STATE_CONFIGURED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) SIGP_CC_ORDER_CODE_ACCEPTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) rc = pcpu_alloc_lowcore(pcpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) pcpu_prepare_secondary(pcpu, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) pcpu_attach_task(pcpu, tidle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) pcpu_start_fn(pcpu, smp_start_secondary, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /* Wait until cpu puts itself in the online & active maps */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) while (!cpu_online(cpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static unsigned int setup_possible_cpus __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) static int __init _setup_possible_cpus(char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) get_option(&s, &setup_possible_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) early_param("possible_cpus", _setup_possible_cpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) int __cpu_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) unsigned long cregs[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /* Handle possible pending IPIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) smp_handle_ext_call();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) set_cpu_online(smp_processor_id(), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) update_cpu_masks();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /* Disable pseudo page faults on this cpu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) pfault_fini();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /* Disable interrupt sources via control register. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) __ctl_store(cregs, 0, 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) __ctl_load(cregs, 0, 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) clear_cpu_flag(CIF_NOHZ_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) void __cpu_die(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct pcpu *pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) /* Wait until target cpu is down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) pcpu = pcpu_devices + cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) while (!pcpu_stopped(pcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) pcpu_free_lowcore(pcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) void __noreturn cpu_die(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) idle_task_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) __bpon();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) for (;;) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) void __init smp_fill_possible_mask(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) unsigned int possible, sclp_max, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) sclp_max = max(sclp.mtid, sclp.mtid_cp) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) sclp_max = min(smp_max_threads, sclp_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) sclp_max = (sclp.max_cores * sclp_max) ?: nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) possible = setup_possible_cpus ?: nr_cpu_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) possible = min(possible, sclp_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) set_cpu_possible(cpu, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) void __init smp_prepare_cpus(unsigned int max_cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) /* request the 0x1201 emergency signal external interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) panic("Couldn't request external interrupt 0x1201");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /* request the 0x1202 external call external interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) panic("Couldn't request external interrupt 0x1202");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) void __init smp_prepare_boot_cpu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) struct pcpu *pcpu = pcpu_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) WARN_ON(!cpu_present(0) || !cpu_online(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) pcpu->state = CPU_STATE_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) pcpu->lowcore = (struct lowcore *)(unsigned long) store_prefix();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) S390_lowcore.percpu_offset = __per_cpu_offset[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) void __init smp_setup_processor_id(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) pcpu_devices[0].address = stap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) S390_lowcore.cpu_nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) S390_lowcore.spinlock_lockval = arch_spin_lockval(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) S390_lowcore.spinlock_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * the frequency of the profiling timer can be changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * by writing a multiplier value into /proc/profile.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * usually you want to run this on all CPUs ;)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) int setup_profiling_timer(unsigned int multiplier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) static ssize_t cpu_configure_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) ssize_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) mutex_lock(&smp_cpu_state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) mutex_unlock(&smp_cpu_state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static ssize_t cpu_configure_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct pcpu *pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) int cpu, val, rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) char delim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (sscanf(buf, "%d %c", &val, &delim) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (val != 0 && val != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) mutex_lock(&smp_cpu_state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) /* disallow configuration changes of online cpus and cpu 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) cpu = dev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) cpu = smp_get_base_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (cpu == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) for (i = 0; i <= smp_cpu_mtid; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (cpu_online(cpu + i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) pcpu = pcpu_devices + cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (pcpu->state != CPU_STATE_CONFIGURED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) rc = sclp_core_deconfigure(pcpu->address >> smp_cpu_mt_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) for (i = 0; i <= smp_cpu_mtid; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) pcpu[i].state = CPU_STATE_STANDBY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) smp_cpu_set_polarization(cpu + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) POLARIZATION_UNKNOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) topology_expect_change();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (pcpu->state != CPU_STATE_STANDBY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) rc = sclp_core_configure(pcpu->address >> smp_cpu_mt_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) for (i = 0; i <= smp_cpu_mtid; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (cpu + i >= nr_cpu_ids || !cpu_present(cpu + i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) pcpu[i].state = CPU_STATE_CONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) smp_cpu_set_polarization(cpu + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) POLARIZATION_UNKNOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) topology_expect_change();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) mutex_unlock(&smp_cpu_state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return rc ? rc : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static ssize_t show_cpu_address(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) static struct attribute *cpu_common_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) &dev_attr_configure.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) &dev_attr_address.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) static struct attribute_group cpu_common_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) .attrs = cpu_common_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) static struct attribute *cpu_online_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) &dev_attr_idle_count.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) &dev_attr_idle_time_us.attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) static struct attribute_group cpu_online_attr_group = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) .attrs = cpu_online_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) static int smp_cpu_online(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct device *s = &per_cpu(cpu_device, cpu)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) return sysfs_create_group(&s->kobj, &cpu_online_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) static int smp_cpu_pre_down(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) struct device *s = &per_cpu(cpu_device, cpu)->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) static int smp_add_present_cpu(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct device *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) struct cpu *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) c = kzalloc(sizeof(*c), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) per_cpu(cpu_device, cpu) = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) s = &c->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) c->hotpluggable = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) rc = register_cpu(c, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) goto out_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) rc = topology_cpu_init(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) goto out_topology;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) out_topology:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) out_cpu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) unregister_cpu(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) int __ref smp_rescan_cpus(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct sclp_core_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) int nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) info = kzalloc(sizeof(*info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) smp_get_core_info(info, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) get_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) mutex_lock(&smp_cpu_state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) nr = __smp_rescan_cpus(info, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) mutex_unlock(&smp_cpu_state_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) put_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) kfree(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) if (nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) topology_schedule_update();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) static ssize_t __ref rescan_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) rc = lock_device_hotplug_sysfs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) rc = smp_rescan_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) unlock_device_hotplug();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return rc ? rc : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) static DEVICE_ATTR_WO(rescan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static int __init s390_smp_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) int cpu, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) for_each_present_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) rc = smp_add_present_cpu(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "s390/smp:online",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) smp_cpu_online, smp_cpu_pre_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) rc = rc <= 0 ? rc : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) subsys_initcall(s390_smp_init);