Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #ifndef _KVM_PPC_BOOK3S_XIVE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #define _KVM_PPC_BOOK3S_XIVE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #ifdef CONFIG_KVM_XICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "book3s_xics.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * The XIVE Interrupt source numbers are within the range 0 to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * KVMPPC_XICS_NR_IRQS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define KVMPPC_XIVE_FIRST_IRQ	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define KVMPPC_XIVE_NR_IRQS	KVMPPC_XICS_NR_IRQS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * State for one guest irq source.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * For each guest source we allocate a HW interrupt in the XIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * which we use for all SW triggers. It will be unused for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * pass-through but it's easier to keep around as the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * guest interrupt can alternatively be emulated or pass-through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * if a physical device is hot unplugged and replaced with an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * emulated one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * This state structure is very similar to the XICS one with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * additional XIVE specific tracking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) struct kvmppc_xive_irq_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	bool valid;			/* Interrupt entry is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	u32 number;			/* Guest IRQ number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	u32 ipi_number;			/* XIVE IPI HW number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	struct xive_irq_data ipi_data;	/* XIVE IPI associated data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	u32 pt_number;			/* XIVE Pass-through number if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	struct xive_irq_data *pt_data;	/* XIVE Pass-through associated data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	/* Targetting as set by guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	u8 guest_priority;		/* Guest set priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	u8 saved_priority;		/* Saved priority when masking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	/* Actual targetting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	u32 act_server;			/* Actual server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	u8 act_priority;		/* Actual priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	/* Various state bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	bool in_eoi;			/* Synchronize with H_EOI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	bool old_p;			/* P bit state when masking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	bool old_q;			/* Q bit state when masking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	bool lsi;			/* level-sensitive interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	bool asserted;			/* Only for emulated LSI: current state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	/* Saved for migration state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	bool in_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	bool saved_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	bool saved_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	u8 saved_scan_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	/* Xive native */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	u32 eisn;			/* Guest Effective IRQ number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) /* Select the "right" interrupt (IPI vs. passthrough) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) static inline void kvmppc_xive_select_irq(struct kvmppc_xive_irq_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 					  u32 *out_hw_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 					  struct xive_irq_data **out_xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	if (state->pt_number) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		if (out_hw_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 			*out_hw_irq = state->pt_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		if (out_xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 			*out_xd = state->pt_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		if (out_hw_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 			*out_hw_irq = state->ipi_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		if (out_xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			*out_xd = &state->ipi_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * This corresponds to an "ICS" in XICS terminology, we use it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * as a mean to break up source information into multiple structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) struct kvmppc_xive_src_block {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	arch_spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	u16 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) struct kvmppc_xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) struct kvmppc_xive_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	int (*reset_mapped)(struct kvm *kvm, unsigned long guest_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct kvmppc_xive {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	struct kvm *kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	struct kvm_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	struct dentry *dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	/* VP block associated with the VM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	u32	vp_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	/* Blocks of sources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct kvmppc_xive_src_block *src_blocks[KVMPPC_XICS_MAX_ICS_ID + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	u32	max_sbid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 * For state save, we lazily scan the queues on the first interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	 * being migrated. We don't have a clean way to reset that flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	 * so we keep track of the number of valid sources and how many of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	 * them were migrated so we can reset when all of them have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	 * processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	u32	src_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	u32	saved_src_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	 * Some irqs are delayed on restore until the source is created,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	 * keep track here of how many of them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	u32	delayed_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	/* Which queues (priorities) are in use by the guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	u8	qmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	/* Queue orders */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	u32	q_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	u32	q_page_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	/* Flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	u8	single_escalation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	/* Number of entries in the VP block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	u32	nr_servers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	struct kvmppc_xive_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	struct address_space   *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	struct mutex mapping_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	struct mutex lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define KVMPPC_XIVE_Q_COUNT	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct kvmppc_xive_vcpu {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	struct kvmppc_xive	*xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	struct kvm_vcpu		*vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	bool			valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	/* Server number. This is the HW CPU ID from a guest perspective */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	u32			server_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	 * HW VP corresponding to this VCPU. This is the base of the VP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	 * block plus the server number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	u32			vp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	u32			vp_chip_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	u32			vp_cam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	/* IPI used for sending ... IPIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	u32			vp_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	struct xive_irq_data	vp_ipi_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	/* Local emulation state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	uint8_t			cppr;	/* guest CPPR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	uint8_t			hw_cppr;/* Hardware CPPR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	uint8_t			mfrr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	uint8_t			pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	/* Each VP has 8 queues though we only provision some */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	struct xive_q		queues[KVMPPC_XIVE_Q_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	u32			esc_virq[KVMPPC_XIVE_Q_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	char			*esc_virq_names[KVMPPC_XIVE_Q_COUNT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	/* Stash a delayed irq on restore from migration (see set_icp) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	u32			delayed_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	/* Stats */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	u64			stat_rm_h_xirr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	u64			stat_rm_h_ipoll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	u64			stat_rm_h_cppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	u64			stat_rm_h_eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	u64			stat_rm_h_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	u64			stat_vm_h_xirr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	u64			stat_vm_h_ipoll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	u64			stat_vm_h_cppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	u64			stat_vm_h_eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	u64			stat_vm_h_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	struct kvm_vcpu *vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			return vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmppc_xive *xive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		u32 irq, u16 *source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	u32 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	u16 src = irq & KVMPPC_XICS_SRC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	if (source)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		*source = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	if (bid > KVMPPC_XICS_MAX_ICS_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	return xive->src_blocks[bid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	struct kvm_vcpu *vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  * Mapping between guest priorities and host priorities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  * is as follow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  * Guest request for 0...6 are honored. Guest request for anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  * higher results in a priority of 6 being applied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  * Similar mapping is done for CPPR values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static inline u8 xive_prio_from_guest(u8 prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	if (prio == 0xff || prio < 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		return prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	return 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static inline u8 xive_prio_to_guest(u8 prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	return prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	u32 cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (!qpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	cur = be32_to_cpup(qpage + *idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if ((cur >> 31) == *toggle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	*idx = (*idx + 1) & msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	if (*idx == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		(*toggle) ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	return cur & 0x7fffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 			 unsigned long mfrr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 			      unsigned long mfrr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * Common Xive routines for XICS-over-XIVE and XIVE native
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	struct kvmppc_xive *xive, int irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 				  bool single_escalation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 				    struct kvmppc_xive_vcpu *xc, int irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #endif /* CONFIG_KVM_XICS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #endif /* _KVM_PPC_BOOK3S_XICS_H */