Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * MIPS idle loop and WAIT instruction support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) xxxx  the Anonymous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 1994 - 2006 Ralf Baechle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 2003, 2004  Maciej W. Rozycki
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright (C) 2001, 2004, 2011, 2012	 MIPS Technologies, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/irqflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/cpu-info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/cpu-type.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/idle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/mipsregs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * the implementation of the "wait" feature differs between CPU families. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * points to the function that implements CPU specific wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * The wait instruction stops the pipeline and reduces the power consumption of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * the CPU very much.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) void (*cpu_wait)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) EXPORT_SYMBOL(cpu_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) static void __cpuidle r3081_wait(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	unsigned long cfg = read_c0_conf();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	write_c0_conf(cfg | R30XX_CONF_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	raw_local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) static void __cpuidle r39xx_wait(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	if (!need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	raw_local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) void __cpuidle r4k_wait(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	raw_local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	__r4k_wait();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * This variant is preferable as it allows testing need_resched and going to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * sleep depending on the outcome atomically.  Unfortunately the "It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * implementation-dependent whether the pipeline restarts when a non-enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * using this version a gamble.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) void __cpuidle r4k_wait_irqoff(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	if (!need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		__asm__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		"	.set	push		\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		"	.set	arch=r4000	\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		"	wait			\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		"	.set	pop		\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	raw_local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * The RM7000 variant has to handle erratum 38.	 The workaround is to not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * have any pending stores when the WAIT instruction is executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) static void __cpuidle rm7k_wait_irqoff(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	if (!need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		__asm__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		"	.set	push					\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		"	.set	arch=r4000				\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		"	.set	noat					\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		"	mfc0	$1, $12					\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		"	sync						\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		"	mtc0	$1, $12		# stalls until W stage	\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		"	wait						\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		"	mtc0	$1, $12		# stalls until W stage	\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		"	.set	pop					\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	raw_local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * Au1 'wait' is only useful when the 32kHz counter is used as timer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * since coreclock (and the cp0 counter) stops upon executing it. Only an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * interrupt can wake it, so they must be enabled before entering idle modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static void __cpuidle au1k_wait(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	unsigned long c0status = read_c0_status() | 1;	/* irqs on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	__asm__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	"	.set	push			\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	"	.set	arch=r4000		\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	"	cache	0x14, 0(%0)		\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	"	cache	0x14, 32(%0)		\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	"	sync				\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	"	mtc0	%1, $12			\n" /* wr c0status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	"	wait				\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	"	nop				\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	"	nop				\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	"	nop				\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	"	nop				\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	"	.set	pop			\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	: : "r" (au1k_wait), "r" (c0status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static int __initdata nowait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static int __init wait_disable(char *s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	nowait = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) __setup("nowait", wait_disable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) void __init check_wait(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	struct cpuinfo_mips *c = &current_cpu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (nowait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		printk("Wait instruction disabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	 * MIPSr6 specifies that masked interrupts should unblock an executing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	 * wait instruction, and thus that it is safe for us to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	 * r4k_wait_irqoff. Yippee!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	if (cpu_has_mips_r6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		cpu_wait = r4k_wait_irqoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	switch (current_cpu_type()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	case CPU_R3081:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	case CPU_R3081E:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		cpu_wait = r3081_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	case CPU_TX3927:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		cpu_wait = r39xx_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	case CPU_R4200:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	case CPU_R4600:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	case CPU_R4640:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	case CPU_R4650:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	case CPU_R4700:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	case CPU_R5000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	case CPU_R5500:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	case CPU_NEVADA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	case CPU_4KC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	case CPU_4KEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	case CPU_4KSC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	case CPU_5KC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	case CPU_5KE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	case CPU_25KF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	case CPU_PR4450:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	case CPU_BMIPS3300:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	case CPU_BMIPS4350:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	case CPU_BMIPS4380:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	case CPU_CAVIUM_OCTEON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	case CPU_CAVIUM_OCTEON_PLUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	case CPU_CAVIUM_OCTEON2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	case CPU_CAVIUM_OCTEON3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	case CPU_XBURST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	case CPU_LOONGSON32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	case CPU_XLR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	case CPU_XLP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		cpu_wait = r4k_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	case CPU_LOONGSON64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 				(PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 				(c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			cpu_wait = r4k_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	case CPU_BMIPS5000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		cpu_wait = r4k_wait_irqoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	case CPU_RM7000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		cpu_wait = rm7k_wait_irqoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	case CPU_PROAPTIV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	case CPU_P5600:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		 * Incoming Fast Debug Channel (FDC) data during a wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		 * instruction causes the wait never to resume, even if an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		 * interrupt is received. Avoid using wait at all if FDC data is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		 * likely to be received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	case CPU_M14KC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	case CPU_M14KEC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	case CPU_24K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	case CPU_34K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	case CPU_1004K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	case CPU_1074K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	case CPU_INTERAPTIV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	case CPU_M5150:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	case CPU_QEMU_GENERIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		cpu_wait = r4k_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		if (read_c0_config7() & MIPS_CONF7_WII)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			cpu_wait = r4k_wait_irqoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	case CPU_74K:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		cpu_wait = r4k_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			cpu_wait = r4k_wait_irqoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	case CPU_TX49XX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		cpu_wait = r4k_wait_irqoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	case CPU_ALCHEMY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		cpu_wait = au1k_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	case CPU_20KC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		 * WAIT on Rev1.0 has E1, E2, E3 and E16.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		 * WAIT on Rev2.0 and Rev3.0 has E16.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		 * Rev3.1 WAIT is nop, why bother
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		if ((c->processor_id & 0xff) <= 0x64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		 * Another rev is incremeting c0_count at a reduced clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		 * rate while in WAIT mode.  So we basically have the choice
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		 * between using the cp0 timer as clocksource or avoiding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		 * the WAIT instruction.  Until more details are known,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		 * disable the use of WAIT for 20Kc entirely.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		   cpu_wait = r4k_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) void arch_cpu_idle(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	if (cpu_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		cpu_wait();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		raw_local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #ifdef CONFIG_CPU_IDLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			    struct cpuidle_driver *drv, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	arch_cpu_idle();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #endif