^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2012 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/threads.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/epapr_hcalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/reg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/thread_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/ppc_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/asm-compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/asm-offsets.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #ifndef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /* epapr_ev_idle() was derived from e500_idle() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) _GLOBAL(epapr_ev_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) PPC_LL r4, TI_LOCAL_FLAGS(r2) /* set napping bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) ori r4, r4,_TLF_NAPPING /* so when we take an exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) PPC_STL r4, TI_LOCAL_FLAGS(r2) /* it will return to our caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) wrteei 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) idle_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) LOAD_REG_IMMEDIATE(r11, EV_HCALL_TOKEN(EV_IDLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) .global epapr_ev_idle_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) epapr_ev_idle_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) li r3, -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Guard against spurious wakeups from a hypervisor --
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * only interrupt will cause us to return to LR due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * _TLF_NAPPING.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) b idle_loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* Hypercall entry point. Will be patched with device tree instructions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) .global epapr_hypercall_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) epapr_hypercall_start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) li r3, -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) nop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) EXPORT_SYMBOL(epapr_hypercall_start)