^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * This control block defines the PACA which defines the processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * specific data for each logical processor on the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * There are some pointers defined that are utilized by PLIC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * C 2001 PPC 64 Team, IBM Corp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #ifndef _ASM_POWERPC_PACA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define _ASM_POWERPC_PACA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #ifdef __KERNEL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/lppaca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/exception-64e.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/exception-64s.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/kvm_book3s_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/accounting.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/hmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/cpuidle.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm-generic/mmiowb_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) register struct paca_struct *local_paca asm("r13");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * Add standard checks that preemption cannot occur when using get_paca():
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * otherwise the paca_struct it points to may be the wrong one just after.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define get_paca() ((void) debug_smp_processor_id(), local_paca)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define get_paca() local_paca
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #ifdef CONFIG_PPC_PSERIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define get_lppaca() (get_paca()->lppaca_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define get_slb_shadow() (get_paca()->slb_shadow_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct task_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct rtas_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Defines the layout of the paca.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * This structure is not directly accessed by firmware or the service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct paca_struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #ifdef CONFIG_PPC_PSERIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Because hw_cpu_id, unlike other paca fields, is accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * routinely from other CPUs (from the IRQ code), we stick to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * read-only (after boot) fields in the first cacheline to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * avoid cacheline bouncing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #endif /* CONFIG_PPC_PSERIES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * load lock_token and paca_index with a single lwz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * instruction. They must travel together and be properly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #ifdef __BIG_ENDIAN__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u16 lock_token; /* Constant 0x8000, used in locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u16 paca_index; /* Logical processor number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) u16 paca_index; /* Logical processor number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) u16 lock_token; /* Constant 0x8000, used in locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) u64 kernel_toc; /* Kernel TOC address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) u64 kernelbase; /* Base address of kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) u64 kernel_msr; /* MSR while running in kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) void *emergency_sp; /* pointer to emergency stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u64 data_offset; /* per cpu data offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) s16 hw_cpu_id; /* Physical processor number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) u8 cpu_start; /* At startup, processor spins until */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* this becomes non-zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u8 kexec_state; /* set when kexec down has irqs off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct slb_shadow *slb_shadow_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct dtl_entry *dispatch_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct dtl_entry *dispatch_log_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) u64 dscr_default; /* per-CPU default DSCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Now, starting in cacheline 2, the exception save areas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* used for most interrupts/exceptions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) u64 exgen[EX_SIZE] __attribute__((aligned(0x80)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u64 exslb[EX_SIZE]; /* used for SLB/segment table misses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * on the linear mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* SLB related definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u16 vmalloc_sllp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) u8 slb_cache_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u8 stab_rr; /* stab/slb round-robin counter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #ifdef CONFIG_DEBUG_VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) u8 in_kernel_slb_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u32 slb_used_bitmap; /* Bitmaps for first 32 SLB entries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u32 slb_kern_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) u32 slb_cache[SLB_CACHE_ENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u64 exgen[8] __aligned(0x40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Keep pgd in the same cacheline as the start of extlb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) pgd_t *pgd __aligned(0x40); /* Current PGD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) pgd_t *kernel_pgd; /* Kernel PGD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Shared by all threads of a core -- points to tcd of first thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct tlb_core_data *tcd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * We can have up to 3 levels of reentrancy in the TLB miss handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * in each of four exception levels (normal, crit, mcheck, debug).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) u64 extlb[12][EX_TLB_SIZE / sizeof(u64)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) u64 exmc[8]; /* used for machine checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) u64 excrit[8]; /* used for crit interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u64 exdbg[8]; /* used for debug interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* Kernel stack pointers for use by special exceptions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) void *mc_kstack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) void *crit_kstack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) void *dbg_kstack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct tlb_core_data tcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #endif /* CONFIG_PPC_BOOK3E */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #ifdef CONFIG_PPC_BOOK3S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) mm_context_id_t mm_ctx_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #ifdef CONFIG_PPC_MM_SLICES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) unsigned long mm_ctx_slb_addr_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) u16 mm_ctx_user_psize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) u16 mm_ctx_sllp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * then miscellaneous read-write fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct task_struct *__current; /* Pointer to current */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) u64 kstack; /* Saved Kernel stack addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) u64 saved_msr; /* MSR saved here by enter_rtas */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #ifdef CONFIG_PPC_BOOK3E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) u16 trap_save; /* Used when bad stack is encountered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u8 irq_soft_mask; /* mask for irq soft masking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) u8 irq_happened; /* irq happened while soft-disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) u8 pmcregs_in_use; /* pseries puts this in lppaca */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) u64 sprg_vdso; /* Saved user-visible sprg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) u64 tm_scratch; /* TM scratch area for reclaim */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #ifdef CONFIG_PPC_POWERNV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* PowerNV idle fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /* PNV_CORE_IDLE_* bits, all siblings work on thread 0 paca */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned long idle_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* P7/P8 specific fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* PNV_THREAD_RUNNING/NAP/SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u8 thread_idle_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* Mask to denote subcore sibling threads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) u8 subcore_sibling_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* P9 specific fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /* The PSSCR value that the kernel requested before going to stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) u64 requested_psscr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* Flag to request this thread not to stop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) atomic_t dont_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* Non-maskable exceptions that are not performance critical */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) u64 exnmi[EX_SIZE]; /* used for system reset (nmi) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) u64 exmc[EX_SIZE]; /* used for machine checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* Exclusive stacks for system reset and machine check exception. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) void *nmi_emergency_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void *mc_emergency_sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u16 in_nmi; /* In nmi handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Flag to check whether we are in machine check early handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * and already using emergency stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u16 in_mce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u8 hmi_event_available; /* HMI event is available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u8 hmi_p9_special_emu; /* HMI P9 special emulation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u32 hmi_irqs; /* HMI irq stat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) u8 ftrace_enabled; /* Hard disable ftrace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* Stuff for accurate time accounting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct cpu_accounting_data accounting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u64 dtl_ridx; /* read index in dispatch log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #ifdef CONFIG_KVM_BOOK3S_HANDLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* We use this to store guest state in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct kvmppc_host_state kvm_hstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * more details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct sibling_subcore_state *sibling_subcore_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * rfi fallback flush must be in its own cacheline to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * other paca data leaking into the L1d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) u64 exrfi[EX_SIZE] __aligned(0x80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) void *rfi_flush_fallback_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) u64 l1d_flush_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #ifdef CONFIG_PPC_PSERIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct rtas_args *rtas_args_reentrant;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) u8 *mce_data_buf; /* buffer to hold per cpu rtas errlog */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #endif /* CONFIG_PPC_PSERIES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) #ifdef CONFIG_PPC_BOOK3S_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* Capture SLB related old contents in MCE handler. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct slb_entry *mce_faulty_slbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) u16 slb_save_cache_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #endif /* CONFIG_PPC_BOOK3S_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #ifdef CONFIG_STACKPROTECTOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) unsigned long canary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #ifdef CONFIG_MMIOWB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct mmiowb_state mmiowb_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) } ____cacheline_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) extern void copy_mm_to_paca(struct mm_struct *mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) extern struct paca_struct **paca_ptrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) extern void initialise_paca(struct paca_struct *new_paca, int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) extern void setup_paca(struct paca_struct *new_paca);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) extern void allocate_paca_ptrs(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) extern void allocate_paca(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) extern void free_unused_pacas(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #else /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static inline void allocate_paca_ptrs(void) { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static inline void allocate_paca(int cpu) { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static inline void free_unused_pacas(void) { };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #endif /* __KERNEL__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #endif /* _ASM_POWERPC_PACA_H */