Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright 2016,2017 IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #ifndef _ASM_POWERPC_XIVE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #define _ASM_POWERPC_XIVE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <asm/opal-api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #define XIVE_INVALID_VP	0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #ifdef CONFIG_PPC_XIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * Thread Interrupt Management Area (TIMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * This is a global MMIO region divided in 4 pages of varying access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * permissions, providing access to per-cpu interrupt management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * functions. It always identifies the CPU doing the access based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * on the PowerBus initiator ID, thus we always access via the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * same offset regardless of where the code is executing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) extern void __iomem *xive_tima;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) extern unsigned long xive_tima_os;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * Offset in the TM area of our current execution level (provided by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * the backend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) extern u32 xive_tima_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * Per-irq data (irq_get_handler_data for normal IRQs), IPIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * have it stored in the xive_cpu structure. We also cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * for normal interrupts the current target CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * This structure is setup by the backend for each interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) struct xive_irq_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	u64 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	u64 eoi_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	void __iomem *eoi_mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	u64 trig_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	void __iomem *trig_mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	u32 esb_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	int src_chip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	u32 hw_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	/* Setup/used by frontend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	int target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	 * saved_p means that there is a queue entry for this interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	 * in some CPU's queue (not including guest vcpu queues), even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	 * if P is not set in the source ESB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	 * stale_p means that there is no queue entry for this interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	 * in some CPU's queue, even if P is set in the source ESB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	bool saved_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	bool stale_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define XIVE_IRQ_FLAG_STORE_EOI	0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define XIVE_IRQ_FLAG_LSI	0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define XIVE_IRQ_FLAG_SHIFT_BUG	0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define XIVE_IRQ_FLAG_MASK_FW	0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define XIVE_IRQ_FLAG_EOI_FW	0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define XIVE_IRQ_FLAG_H_INT_ESB	0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) /* Special flag set by KVM for excalation interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define XIVE_IRQ_NO_EOI		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #define XIVE_INVALID_CHIP_ID	-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) /* A queue tracking structure in a CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) struct xive_q {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	__be32 			*qpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	u32			msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	u32			idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	u32			toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	u64			eoi_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	u32			esc_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	atomic_t		count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	atomic_t		pending_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	u64			guest_qaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	u32			guest_qshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) /* Global enable flags for the XIVE support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) extern bool __xive_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) static inline bool xive_enabled(void) { return __xive_enabled; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) bool xive_spapr_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) bool xive_native_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) void xive_smp_probe(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) int  xive_smp_prepare_cpu(unsigned int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) void xive_smp_setup_cpu(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) void xive_smp_disable_cpu(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) void xive_teardown_cpu(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) void xive_shutdown(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void xive_flush_interrupt(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* xmon hook */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void xmon_xive_do_dump(int cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* APIs used by KVM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) u32 xive_native_default_eq_shift(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u32 xive_native_alloc_vp_block(u32 max_vcpus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) void xive_native_free_vp_block(u32 vp_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int xive_native_populate_irq_data(u32 hw_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 				  struct xive_irq_data *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) void xive_cleanup_irq_data(struct xive_irq_data *xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void xive_native_free_irq(u32 irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 				__be32 *qpage, u32 order, bool can_escalate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) void xive_native_sync_source(u32 hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) void xive_native_sync_queue(u32 hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) bool is_xive_irq(struct irq_chip *chip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) int xive_native_enable_vp(u32 vp_id, bool single_escalation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int xive_native_disable_vp(u32 vp_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) bool xive_native_has_single_escalation(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int xive_native_get_queue_info(u32 vp_id, uint32_t prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 			       u64 *out_qpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 			       u64 *out_qsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			       u64 *out_qeoi_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			       u32 *out_escalate_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			       u64 *out_qflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int xive_native_get_queue_state(u32 vp_id, uint32_t prio, u32 *qtoggle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 				u32 *qindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) int xive_native_set_queue_state(u32 vp_id, uint32_t prio, u32 qtoggle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 				u32 qindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int xive_native_get_vp_state(u32 vp_id, u64 *out_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) bool xive_native_has_queue_state_support(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) extern u32 xive_native_alloc_irq_on_chip(u32 chip_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) static inline u32 xive_native_alloc_irq(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	return xive_native_alloc_irq_on_chip(OPAL_XIVE_ANY_CHIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static inline bool xive_enabled(void) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static inline bool xive_spapr_init(void) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static inline bool xive_native_init(void) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static inline void xive_smp_probe(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static inline int  xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static inline void xive_smp_setup_cpu(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline void xive_smp_disable_cpu(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static inline void xive_shutdown(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static inline void xive_flush_interrupt(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static inline u32 xive_native_alloc_vp_block(u32 max_vcpus) { return XIVE_INVALID_VP; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static inline void xive_native_free_vp_block(u32 vp_base) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #endif /* _ASM_POWERPC_XIVE_H */