Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  This file contains the power_save function for 6xx & 7xxx CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *  rewritten in assembler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *  Warning ! This code assumes that if your machine has a 750fx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *  it will have PLL 1 set to low speed mode (used during NAP/DOZE).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *  if this is not the case some additional changes will have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *  be done to check a runtime var (a bit like powersave-nap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/ppc_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/feature-fixups.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	.text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * Init idle, called at early CPU setup time from head.S for each CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * Make sure no rest of NAP mode remains in HID0, save default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * values for some CPU specific registers. Called with r24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * containing CPU number and r3 reloc offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) _GLOBAL(init_idle_6xx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	mfspr	r4,SPRN_HID0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	rlwinm	r4,r4,0,10,8	/* Clear NAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	mtspr	SPRN_HID0, r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	b	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	slwi	r5,r24,2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	add	r5,r5,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	mfspr	r4,SPRN_MSSCR0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	addis	r6,r5, nap_save_msscr0@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	stw	r4,nap_save_msscr0@l(r6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	mfspr	r4,SPRN_HID1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	addis	r6,r5,nap_save_hid1@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	stw	r4,nap_save_hid1@l(r6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * Here is the power_save_6xx function. This could eventually be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * split into several functions & changing the function pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * depending on the various features.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) _GLOBAL(ppc6xx_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	/* Check if we can nap or doze, put HID0 mask in r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	lis	r3, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	lis	r3,HID0_DOZE@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	/* We must dynamically check for the NAP feature as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	 * can be cleared by CPU init after the fixups are done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	lis	r4,cur_cpu_spec@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	lwz	r4,cur_cpu_spec@l(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	lwz	r4,CPU_SPEC_FEATURES(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	andi.	r0,r4,CPU_FTR_CAN_NAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	beq	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	/* Now check if user or arch enabled NAP mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	lis	r4,powersave_nap@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	lwz	r4,powersave_nap@l(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	cmpwi	0,r4,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	beq	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	lis	r3,HID0_NAP@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 1:	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	cmpwi	0,r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	beqlr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	/* Some pre-nap cleanups needed on some CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	andis.	r0,r3,HID0_NAP@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	beq	2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	/* Disable L2 prefetch on some 745x and try to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	 * L2 prefetch engines are idle. As explained by errata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	 * text, we can't be sure they are, we just hope very hard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	 * that well be enough (sic !). At least I noticed Apple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	 * doesn't even bother doing the dcbf's here...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	mfspr	r4,SPRN_MSSCR0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	rlwinm	r4,r4,0,0,29
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	mtspr	SPRN_MSSCR0,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	lis	r4,KERNELBASE@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	dcbf	0,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	dcbf	0,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	dcbf	0,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	dcbf	0,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	/* Go to low speed mode on some 750FX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	lis	r4,powersave_lowspeed@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	lwz	r4,powersave_lowspeed@l(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	cmpwi	0,r4,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	beq	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	mfspr	r4,SPRN_HID1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	oris	r4,r4,0x0001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	mtspr	SPRN_HID1,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 1:	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	/* Go to NAP or DOZE now */	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	mfspr	r4,SPRN_HID0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	lis	r5,(HID0_NAP|HID0_SLEEP)@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	oris	r5,r5,HID0_DOZE@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	andc	r4,r4,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	or	r4,r4,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	oris	r4,r4,HID0_DPM@h	/* that should be done once for all  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	mtspr	SPRN_HID0,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	DSSALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	lwz	r8,TI_LOCAL_FLAGS(r2)	/* set napping bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	ori	r8,r8,_TLF_NAPPING	/* so when we take an exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	stw	r8,TI_LOCAL_FLAGS(r2)	/* it will return to our caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	mfmsr	r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	ori	r7,r7,MSR_EE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	oris	r7,r7,MSR_POW@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 1:	sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	mtmsr	r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	b	1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  * Return from NAP/DOZE mode, restore some CPU specific registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  * we are called with DR/IR still off and r2 containing physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)  * address of current.  R11 points to the exception frame (physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)  * address).  We have to preserve r10.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) _GLOBAL(power_save_ppc32_restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	lwz	r9,_LINK(r11)		/* interrupted in ppc6xx_idle: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	stw	r9,_NIP(r11)		/* make it do a blr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	lwz	r11,TASK_CPU(r2)	/* get cpu number * 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	slwi	r11,r11,2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	li	r11,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	/* Todo make sure all these are in the same page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	 * and load r11 (@ha part + CPU offset) only once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	mfspr	r9,SPRN_HID0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	andis.	r9,r9,HID0_NAP@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	beq	1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	addis	r9, r11, nap_save_msscr0@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	addis	r9,r11,(nap_save_msscr0-KERNELBASE)@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	lwz	r9,nap_save_msscr0@l(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	mtspr	SPRN_MSSCR0, r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #ifdef CONFIG_VMAP_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	addis	r9, r11, nap_save_hid1@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	addis	r9,r11,(nap_save_hid1-KERNELBASE)@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	lwz	r9,nap_save_hid1@l(r9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	mtspr	SPRN_HID1, r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	b	transfer_to_handler_cont
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) _ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	.data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) _GLOBAL(nap_save_msscr0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	.space	4*NR_CPUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) _GLOBAL(nap_save_hid1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	.space	4*NR_CPUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) _GLOBAL(powersave_lowspeed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	.long	0