^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Dave Liu <daveliu@freescale.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * copy from idle_6xx.S and modify for e500 based processor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * implement the power_save function in idle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/ppc_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/feature-fixups.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) _GLOBAL(e500_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) lwz r4,TI_LOCAL_FLAGS(r2) /* set napping bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) ori r4,r4,_TLF_NAPPING /* so when we take an exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) stw r4,TI_LOCAL_FLAGS(r2) /* it will return to our caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #ifdef CONFIG_PPC_E500MC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) wrteei 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) 1: wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * Guard against spurious wakeups (e.g. from a hypervisor) --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * any real interrupt will cause us to return to LR due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * _TLF_NAPPING.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) b 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* Check if we can nap or doze, put HID0 mask in r3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) lis r3,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) lis r3,HID0_DOZE@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) END_FTR_SECTION_IFSET(CPU_FTR_CAN_DOZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) BEGIN_FTR_SECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* Now check if user enabled NAP mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) lis r4,powersave_nap@ha
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) lwz r4,powersave_nap@l(r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) cmpwi 0,r4,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) beq 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) stwu r1,-16(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) mflr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) stw r0,20(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) bl flush_dcache_L1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) lwz r0,20(r1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) addi r1,r1,16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) mtlr r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) lis r3,HID0_NAP@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* Go to NAP or DOZE now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) mfspr r4,SPRN_HID0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) rlwinm r4,r4,0,~(HID0_DOZE|HID0_NAP|HID0_SLEEP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) or r4,r4,r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) mtspr SPRN_HID0,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) mfmsr r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) oris r7,r7,MSR_WE@h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ori r7,r7,MSR_EE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) msync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) mtmsr r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) isync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) 2: b 2b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #endif /* !E500MC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * Return from NAP/DOZE mode, restore some CPU specific registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * r2 containing physical address of current.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * r11 points to the exception frame (physical address).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * We have to preserve r10.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) _GLOBAL(power_save_ppc32_restore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) lwz r9,_LINK(r11) /* interrupted in e500_idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) stw r9,_NIP(r11) /* make it do a blr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) lwz r11,TASK_CPU(r2) /* get cpu number * 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) slwi r11,r11,2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) li r11,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) b transfer_to_handler_cont
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) _ASM_NOKPROBE_SYMBOL(power_save_ppc32_restore)