Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright 2012 Michael Ellerman, IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/anon_inodes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <asm/kvm_book3s.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <asm/kvm_ppc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <asm/hvcall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <asm/xics.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <asm/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <asm/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "book3s_xics.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #if 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #define XICS_DBG(fmt...) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define XICS_DBG(fmt...) trace_printk(fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define ENABLE_REALMODE	true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define DEBUG_REALMODE	false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * LOCKING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * =======
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * Each ICS has a spin lock protecting the information about the IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * sources and avoiding simultaneous deliveries of the same interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * ICP operations are done via a single compare & swap transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  * (most ICP state fits in the union kvmppc_icp_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * TODO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  * ====
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * - To speed up resends, keep a bitmap of "resend" set bits in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  *   ICS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * - Speed up server# -> ICP lookup (array ? hash table ?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  *   locks array to improve scalability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) /* -- ICS routines -- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 			    u32 new_irq, bool check_resend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  * Return value ideally indicates how the interrupt was handled, but no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  * so just return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	struct ics_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	struct kvmppc_ics *ics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	u16 src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	u32 pq_old, pq_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	ics = kvmppc_xics_find_ics(xics, irq, &src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	if (!ics) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	state = &ics->irq_state[src];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	if (!state->exists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	if (level == KVM_INTERRUPT_SET_LEVEL || level == KVM_INTERRUPT_SET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		level = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	else if (level == KVM_INTERRUPT_UNSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	 * Take other values the same as 1, consistent with original code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	 * maybe WARN here?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	if (!state->lsi && level == 0) /* noop for MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		pq_old = state->pq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		if (state->lsi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 			if (level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 				if (pq_old & PQ_PRESENTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 					/* Setting already set LSI ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 					return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 				pq_new = PQ_PRESENTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 				pq_new = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	} while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	/* Test P=1, Q=0, this is the only case where we present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	if (pq_new == PQ_PRESENTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		icp_deliver_irq(xics, NULL, irq, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	/* Record which CPU this arrived on for passed-through interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	if (state->host_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		state->intr_cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 			     struct kvmppc_icp *icp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		struct ics_irq_state *state = &ics->irq_state[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		if (state->resend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 			XICS_DBG("resend %#x prio %#x\n", state->number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 				      state->priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			icp_deliver_irq(xics, icp, state->number, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		       struct ics_irq_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		       u32 server, u32 priority, u32 saved_priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	bool deliver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	arch_spin_lock(&ics->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	state->server = server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	state->priority = priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	state->saved_priority = saved_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	deliver = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	if ((state->masked_pending || state->resend) && priority != MASKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		state->masked_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		state->resend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		deliver = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	arch_spin_unlock(&ics->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	return deliver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	struct kvmppc_xics *xics = kvm->arch.xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	struct kvmppc_icp *icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	struct kvmppc_ics *ics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	struct ics_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	u16 src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	if (!xics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	ics = kvmppc_xics_find_ics(xics, irq, &src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	if (!ics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	state = &ics->irq_state[src];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	icp = kvmppc_xics_find_server(kvm, server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	if (!icp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		 irq, server, priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		 state->masked_pending, state->resend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	if (write_xive(xics, ics, state, server, priority, priority))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		icp_deliver_irq(xics, icp, irq, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	struct kvmppc_xics *xics = kvm->arch.xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	struct kvmppc_ics *ics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	struct ics_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	u16 src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	if (!xics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	ics = kvmppc_xics_find_ics(xics, irq, &src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	if (!ics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	state = &ics->irq_state[src];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	arch_spin_lock(&ics->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	*server = state->server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	*priority = state->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	arch_spin_unlock(&ics->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	struct kvmppc_xics *xics = kvm->arch.xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	struct kvmppc_icp *icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	struct kvmppc_ics *ics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	struct ics_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	u16 src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	if (!xics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	ics = kvmppc_xics_find_ics(xics, irq, &src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	if (!ics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	state = &ics->irq_state[src];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	icp = kvmppc_xics_find_server(kvm, state->server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	if (!icp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	if (write_xive(xics, ics, state, state->server, state->saved_priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		       state->saved_priority))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		icp_deliver_irq(xics, icp, irq, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	struct kvmppc_xics *xics = kvm->arch.xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	struct kvmppc_ics *ics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	struct ics_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	u16 src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	if (!xics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	ics = kvmppc_xics_find_ics(xics, irq, &src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	if (!ics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	state = &ics->irq_state[src];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	write_xive(xics, ics, state, state->server, MASKED, state->priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) /* -- ICP routines, including hcalls -- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) static inline bool icp_try_update(struct kvmppc_icp *icp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 				  union kvmppc_icp_state old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 				  union kvmppc_icp_state new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 				  bool change_self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	bool success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	/* Calculate new output value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	/* Attempt atomic update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	if (!success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 		goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		 icp->server_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		 old.cppr, old.mfrr, old.pending_pri, old.xisr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		 old.need_resend, old.out_ee);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	XICS_DBG("UPD        - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		 new.cppr, new.mfrr, new.pending_pri, new.xisr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		 new.need_resend, new.out_ee);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	 * Check for output state update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	 * Note that this is racy since another processor could be updating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	 * the state already. This is why we never clear the interrupt output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	 * here, we only ever set it. The clear only happens prior to doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	 * an update and only by the processor itself. Currently we do it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	 * We also do not try to figure out whether the EE state has changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	 * we unconditionally set it if the new state calls for it. The reason
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	 * for that is that we opportunistically remove the pending interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	 * flag when raising CPPR, so we need to set it back here if an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	 * interrupt is still pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	if (new.out_ee) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		kvmppc_book3s_queue_irqprio(icp->vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 					    BOOK3S_INTERRUPT_EXTERNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		if (!change_self)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 			kvmppc_fast_vcpu_kick(icp->vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	return success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) static void icp_check_resend(struct kvmppc_xics *xics,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			     struct kvmppc_icp *icp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	u32 icsid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	/* Order this load with the test for need_resend in the caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		struct kvmppc_ics *ics = xics->ics[icsid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		if (!test_and_clear_bit(icsid, icp->resend_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		if (!ics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		ics_check_resend(xics, ics, icp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 			       u32 *reject)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	union kvmppc_icp_state old_state, new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	bool success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		 icp->server_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		old_state = new_state = READ_ONCE(icp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		*reject = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		/* See if we can deliver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		success = new_state.cppr > priority &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 			new_state.mfrr > priority &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 			new_state.pending_pri > priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		 * If we can, check for a rejection and perform the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		 * delivery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		if (success) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			*reject = new_state.xisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 			new_state.xisr = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 			new_state.pending_pri = priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 			 * If we failed to deliver we set need_resend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 			 * so a subsequent CPPR state change causes us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			 * to try a new delivery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 			new_state.need_resend = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	} while (!icp_try_update(icp, old_state, new_state, false));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	return success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			    u32 new_irq, bool check_resend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	struct ics_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	struct kvmppc_ics *ics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	u32 reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	u16 src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	 * This is used both for initial delivery of an interrupt and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	 * for subsequent rejection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	 * Rejection can be racy vs. resends. We have evaluated the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	 * rejection in an atomic ICP transaction which is now complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	 * so potentially the ICP can already accept the interrupt again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 * So we need to retry the delivery. Essentially the reject path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	 * boils down to a failed delivery. Always.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	 * Now the interrupt could also have moved to a different target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	 * thus we may need to re-do the ICP lookup as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401)  again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	/* Get the ICS state and lock it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	ics = kvmppc_xics_find_ics(xics, new_irq, &src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	if (!ics) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	state = &ics->irq_state[src];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	/* Get a lock on the ICS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	arch_spin_lock(&ics->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	/* Get our server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	if (!icp || state->server != icp->server_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		icp = kvmppc_xics_find_server(xics->kvm, state->server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		if (!icp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 			pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 				new_irq, state->server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	if (check_resend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		if (!state->resend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	/* Clear the resend bit of that interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	state->resend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	 * If masked, bail out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	 * Note: PAPR doesn't mention anything about masked pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	 * when doing a resend, only when doing a delivery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	 * However that would have the effect of losing a masked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	 * interrupt that was rejected and isn't consistent with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	 * the whole masked_pending business which is about not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	 * losing interrupts that occur while masked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	 * I don't differentiate normal deliveries and resends, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	 * implementation will differ from PAPR and not lose such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	 * interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	if (state->priority == MASKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		XICS_DBG("irq %#x masked pending\n", new_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		state->masked_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	 * Try the delivery, this will set the need_resend flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	 * in the ICP as part of the atomic transaction if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	 * delivery is not possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	 * Note that if successful, the new delivery might have itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	 * rejected an interrupt that was "delivered" before we took the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	 * ics spin lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	 * In this case we do the whole sequence all over again for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	 * new guy. We cannot assume that the rejected interrupt is less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	 * favored than the new one, and thus doesn't need to be delivered,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	 * because by the time we exit icp_try_to_deliver() the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	 * processor may well have alrady consumed & completed it, and thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	 * the rejected interrupt might actually be already acceptable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		 * Delivery was successful, did we reject somebody else ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		if (reject && reject != XICS_IPI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			arch_spin_unlock(&ics->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 			new_irq = reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 			check_resend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 			goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		 * We failed to deliver the interrupt we need to set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		 * resend map bit and mark the ICS state as needing a resend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		state->resend = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		 * Make sure when checking resend, we don't miss the resend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		 * if resend_map bit is seen and cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		set_bit(ics->icsid, icp->resend_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		 * If the need_resend flag got cleared in the ICP some time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		 * between icp_try_to_deliver() atomic update and now, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		 * we know it might have missed the resend_map bit. So we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		 * retry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		if (!icp->state.need_resend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 			state->resend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			arch_spin_unlock(&ics->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			check_resend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	arch_spin_unlock(&ics->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			  u8 new_cppr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	union kvmppc_icp_state old_state, new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	bool resend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	 * This handles several related states in one operation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	 * ICP State: Down_CPPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	 * Load CPPR with new value and if the XISR is 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	 * then check for resends:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	 * ICP State: Resend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	 * If MFRR is more favored than CPPR, check for IPIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	 * and notify ICS of a potential resend. This is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	 * asynchronously (when used in real mode, we will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	 * to exit here).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	 * We do not handle the complete Check_IPI as documented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	 * here. In the PAPR, this state will be used for both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	 * Set_MFRR and Down_CPPR. However, we know that we aren't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	 * changing the MFRR state here so we don't need to handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	 * the case of an MFRR causing a reject of a pending irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	 * this will have been handled when the MFRR was set in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	 * first place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	 * Thus we don't have to handle rejects, only resends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	 * When implementing real mode for HV KVM, resend will lead to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	 * a H_TOO_HARD return and the whole transaction will be handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	 * in virtual mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		old_state = new_state = READ_ONCE(icp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		/* Down_CPPR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		new_state.cppr = new_cppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		 * Cut down Resend / Check_IPI / IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		 * The logic is that we cannot have a pending interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		 * trumped by an IPI at this point (see above), so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		 * know that either the pending interrupt is already an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		 * IPI (in which case we don't care to override it) or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		 * it's either more favored than us or non existent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		if (new_state.mfrr < new_cppr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		    new_state.mfrr <= new_state.pending_pri) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			WARN_ON(new_state.xisr != XICS_IPI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 				new_state.xisr != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 			new_state.pending_pri = new_state.mfrr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			new_state.xisr = XICS_IPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		/* Latch/clear resend bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		resend = new_state.need_resend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		new_state.need_resend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	} while (!icp_try_update(icp, old_state, new_state, true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	 * Now handle resend checks. Those are asynchronous to the ICP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	 * state update in HW (ie bus transactions) so we can handle them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	 * separately here too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	if (resend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		icp_check_resend(xics, icp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	union kvmppc_icp_state old_state, new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	struct kvmppc_icp *icp = vcpu->arch.icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	u32 xirr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	/* First, remove EE from the processor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	 * ICP State: Accept_Interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	 * Return the pending interrupt (if any) along with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	 * current CPPR, then clear the XISR & set CPPR to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	 * pending priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		old_state = new_state = READ_ONCE(icp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		if (!old_state.xisr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		new_state.cppr = new_state.pending_pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		new_state.pending_pri = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		new_state.xisr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	} while (!icp_try_update(icp, old_state, new_state, true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	return xirr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 				 unsigned long mfrr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	union kvmppc_icp_state old_state, new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	struct kvmppc_icp *icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	u32 reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	bool resend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	bool local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		 vcpu->vcpu_id, server, mfrr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	icp = vcpu->arch.icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	local = icp->server_num == server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	if (!local) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		icp = kvmppc_xics_find_server(vcpu->kvm, server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		if (!icp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 			return H_PARAMETER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	 * ICP state: Set_MFRR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	 * If the CPPR is more favored than the new MFRR, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	 * nothing needs to be rejected as there can be no XISR to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	 * reject.  If the MFRR is being made less favored then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	 * there might be a previously-rejected interrupt needing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	 * to be resent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	 * ICP state: Check_IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	 * If the CPPR is less favored, then we might be replacing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	 * an interrupt, and thus need to possibly reject it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	 * ICP State: IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	 * Besides rejecting any pending interrupts, we also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	 * update XISR and pending_pri to mark IPI as pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	 * PAPR does not describe this state, but if the MFRR is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	 * made less favored than its earlier value, there might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	 * a previously-rejected interrupt needing to be resent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	 * Ideally, we would want to resend only if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	 *	prio(pending_interrupt) < mfrr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	 *	prio(pending_interrupt) < cppr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	 * where pending interrupt is the one that was rejected. But
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	 * we don't have that state, so we simply trigger a resend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	 * whenever the MFRR is made less favored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		old_state = new_state = READ_ONCE(icp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		/* Set_MFRR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		new_state.mfrr = mfrr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		/* Check_IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		reject = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		resend = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		if (mfrr < new_state.cppr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			/* Reject a pending interrupt if not an IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			if (mfrr <= new_state.pending_pri) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 				reject = new_state.xisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 				new_state.pending_pri = mfrr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 				new_state.xisr = XICS_IPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		if (mfrr > old_state.mfrr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			resend = new_state.need_resend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			new_state.need_resend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	} while (!icp_try_update(icp, old_state, new_state, local));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	/* Handle reject */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	if (reject && reject != XICS_IPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		icp_deliver_irq(xics, icp, reject, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	/* Handle resend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	if (resend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		icp_check_resend(xics, icp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	return H_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	union kvmppc_icp_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	struct kvmppc_icp *icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	icp = vcpu->arch.icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	if (icp->server_num != server) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		icp = kvmppc_xics_find_server(vcpu->kvm, server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		if (!icp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			return H_PARAMETER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	state = READ_ONCE(icp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	kvmppc_set_gpr(vcpu, 5, state.mfrr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	return H_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	union kvmppc_icp_state old_state, new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	struct kvmppc_icp *icp = vcpu->arch.icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	u32 reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	 * ICP State: Set_CPPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	 * We can safely compare the new value with the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	 * value outside of the transaction as the CPPR is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	 * ever changed by the processor on itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	if (cppr > icp->state.cppr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		icp_down_cppr(xics, icp, cppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	else if (cppr == icp->state.cppr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	 * ICP State: Up_CPPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	 * The processor is raising its priority, this can result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	 * in a rejection of a pending interrupt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	 * ICP State: Reject_Current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	 * We can remove EE from the current processor, the update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	 * transaction will set it again if needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		old_state = new_state = READ_ONCE(icp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		reject = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		new_state.cppr = cppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		if (cppr <= new_state.pending_pri) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			reject = new_state.xisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			new_state.xisr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			new_state.pending_pri = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	} while (!icp_try_update(icp, old_state, new_state, true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	 * Check for rejects. They are handled by doing a new delivery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	 * attempt (see comments in icp_deliver_irq).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	if (reject && reject != XICS_IPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		icp_deliver_irq(xics, icp, reject, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	struct kvmppc_icp *icp = vcpu->arch.icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	struct kvmppc_ics *ics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	struct ics_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	u16 src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	u32 pq_old, pq_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	 * ICS EOI handling: For LSI, if P bit is still set, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	 * resend it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	 * For MSI, we move Q bit into P (and clear Q). If it is set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	 * resend it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	ics = kvmppc_xics_find_ics(xics, irq, &src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	if (!ics) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		XICS_DBG("ios_eoi: IRQ 0x%06x not found !\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		return H_PARAMETER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	state = &ics->irq_state[src];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	if (state->lsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		pq_new = state->pq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			pq_old = state->pq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			pq_new = pq_old >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		} while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	if (pq_new & PQ_PRESENTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		icp_deliver_irq(xics, icp, irq, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	kvm_notify_acked_irq(vcpu->kvm, 0, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	return H_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	struct kvmppc_icp *icp = vcpu->arch.icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	u32 irq = xirr & 0x00ffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	 * ICP State: EOI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	 * Note: If EOI is incorrectly used by SW to lower the CPPR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	 * value (ie more favored), we do not check for rejection of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	 * a pending interrupt, this is a SW error and PAPR specifies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	 * that we don't have to deal with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	 * The sending of an EOI to the ICS is handled after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	 * CPPR update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	 * ICP State: Down_CPPR which we handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	 * in a separate function as it's shared with H_CPPR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	icp_down_cppr(xics, icp, xirr >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	/* IPIs have no EOI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	if (irq == XICS_IPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		return H_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	return ics_eoi(vcpu, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	struct kvmppc_icp *icp = vcpu->arch.icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		 hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	if (icp->rm_action & XICS_RM_KICK_VCPU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		icp->n_rm_kick_vcpu++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		kvmppc_fast_vcpu_kick(icp->rm_kick_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	if (icp->rm_action & XICS_RM_CHECK_RESEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		icp->n_rm_check_resend++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		icp_check_resend(xics, icp->rm_resend_icp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	if (icp->rm_action & XICS_RM_NOTIFY_EOI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		icp->n_rm_notify_eoi++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	icp->rm_action = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	return H_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	unsigned long res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	int rc = H_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	/* Check if we have an ICP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	if (!xics || !vcpu->arch.icp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		return H_HARDWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	/* These requests don't have real-mode implementations at present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	switch (req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	case H_XIRR_X:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		res = kvmppc_h_xirr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		kvmppc_set_gpr(vcpu, 4, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		kvmppc_set_gpr(vcpu, 5, get_tb());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	case H_IPOLL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	/* Check for real mode returning too hard */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		return kvmppc_xics_rm_complete(vcpu, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	switch (req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	case H_XIRR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		res = kvmppc_h_xirr(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		kvmppc_set_gpr(vcpu, 4, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	case H_CPPR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	case H_EOI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	case H_IPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 				  kvmppc_get_gpr(vcpu, 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) /* -- Initialisation code etc. -- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) static void xics_debugfs_irqmap(struct seq_file *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 				struct kvmppc_passthru_irqmap *pimap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	if (!pimap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	seq_printf(m, "========\nPIRQ mappings: %d maps\n===========\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 				pimap->n_mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	for (i = 0; i < pimap->n_mapped; i++)  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		seq_printf(m, "r_hwirq=%x, v_hwirq=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			pimap->mapped[i].r_hwirq, pimap->mapped[i].v_hwirq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) static int xics_debug_show(struct seq_file *m, void *private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	struct kvmppc_xics *xics = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	struct kvm *kvm = xics->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	int icsid, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	unsigned long t_rm_kick_vcpu, t_rm_check_resend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	unsigned long t_rm_notify_eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	unsigned long t_reject, t_check_resend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	if (!kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	t_rm_kick_vcpu = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	t_rm_notify_eoi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	t_rm_check_resend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	t_check_resend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	t_reject = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	xics_debugfs_irqmap(m, kvm->arch.pimap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	seq_printf(m, "=========\nICP state\n=========\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		struct kvmppc_icp *icp = vcpu->arch.icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		union kvmppc_icp_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		if (!icp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		state.raw = READ_ONCE(icp->state.raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			   icp->server_num, state.xisr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			   state.pending_pri, state.cppr, state.mfrr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 			   state.out_ee, state.need_resend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		t_rm_kick_vcpu += icp->n_rm_kick_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		t_rm_notify_eoi += icp->n_rm_notify_eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		t_rm_check_resend += icp->n_rm_check_resend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		t_check_resend += icp->n_check_resend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		t_reject += icp->n_reject;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			t_rm_kick_vcpu, t_rm_check_resend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 			t_rm_notify_eoi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			t_check_resend, t_reject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		struct kvmppc_ics *ics = xics->ics[icsid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		if (!ics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			   icsid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		arch_spin_lock(&ics->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 			struct ics_irq_state *irq = &ics->irq_state[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x pq_state %d resend %d masked pending %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 				   irq->number, irq->server, irq->priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 				   irq->saved_priority, irq->pq_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 				   irq->resend, irq->masked_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		arch_spin_unlock(&ics->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) DEFINE_SHOW_ATTRIBUTE(xics_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) static void xics_debugfs_init(struct kvmppc_xics *xics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		pr_err("%s: no memory for name\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	xics->dentry = debugfs_create_file(name, 0444, powerpc_debugfs_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 					   xics, &xics_debug_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	pr_debug("%s: created %s\n", __func__, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 					struct kvmppc_xics *xics, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	struct kvmppc_ics *ics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	int i, icsid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	/* ICS already exists - somebody else got here first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	if (xics->ics[icsid])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	/* Create the ICS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	if (!ics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	ics->icsid = icsid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		ics->irq_state[i].priority = MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		ics->irq_state[i].saved_priority = MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	xics->ics[icsid] = ics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	if (icsid > xics->max_icsid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		xics->max_icsid = icsid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	return xics->ics[icsid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) static int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	struct kvmppc_icp *icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	if (!vcpu->kvm->arch.xics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	if (kvmppc_xics_find_server(vcpu->kvm, server_num))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	if (!icp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	icp->vcpu = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	icp->server_num = server_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	icp->state.mfrr = MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	icp->state.pending_pri = MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	vcpu->arch.icp = icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	struct kvmppc_icp *icp = vcpu->arch.icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	union kvmppc_icp_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	if (!icp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	state = icp->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	struct kvmppc_icp *icp = vcpu->arch.icp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	union kvmppc_icp_state old_state, new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	struct kvmppc_ics *ics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	u8 cppr, mfrr, pending_pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	u32 xisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	u16 src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	bool resend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	if (!icp || !xics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		KVM_REG_PPC_ICP_XISR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	/* Require the new state to be internally consistent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	if (xisr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		if (pending_pri != 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	} else if (xisr == XICS_IPI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		if (pending_pri != mfrr || pending_pri >= cppr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		if (pending_pri >= mfrr || pending_pri >= cppr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		ics = kvmppc_xics_find_ics(xics, xisr, &src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		if (!ics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	new_state.raw = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	new_state.cppr = cppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	new_state.xisr = xisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	new_state.mfrr = mfrr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	new_state.pending_pri = pending_pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	 * Deassert the CPU interrupt request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	 * icp_try_update will reassert it if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	 * Note that if we displace an interrupt from old_state.xisr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	 * we don't mark it as rejected.  We expect userspace to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	 * the state of the interrupt sources to be consistent with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	 * the ICP states (either before or afterwards, which doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	 * matter).  We do handle resends due to CPPR becoming less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	 * favoured because that is necessary to end up with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	 * consistent state in the situation where userspace restores
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	 * the ICS states before the ICP states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		old_state = READ_ONCE(icp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		if (new_state.mfrr <= old_state.mfrr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			resend = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			new_state.need_resend = old_state.need_resend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 			resend = old_state.need_resend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			new_state.need_resend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	} while (!icp_try_update(icp, old_state, new_state, false));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (resend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		icp_check_resend(xics, icp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	struct kvmppc_ics *ics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	struct ics_irq_state *irqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	u64 __user *ubufp = (u64 __user *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	u64 val, prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	ics = kvmppc_xics_find_ics(xics, irq, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	if (!ics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	irqp = &ics->irq_state[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	arch_spin_lock(&ics->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	if (irqp->exists) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		val = irqp->server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		prio = irqp->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		if (prio == MASKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			val |= KVM_XICS_MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 			prio = irqp->saved_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		val |= prio << KVM_XICS_PRIORITY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		if (irqp->lsi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 			val |= KVM_XICS_LEVEL_SENSITIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 			if (irqp->pq_state & PQ_PRESENTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 				val |= KVM_XICS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		} else if (irqp->masked_pending || irqp->resend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			val |= KVM_XICS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		if (irqp->pq_state & PQ_PRESENTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 			val |= KVM_XICS_PRESENTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		if (irqp->pq_state & PQ_QUEUED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 			val |= KVM_XICS_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	arch_spin_unlock(&ics->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	if (!ret && put_user(val, ubufp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	struct kvmppc_ics *ics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	struct ics_irq_state *irqp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	u64 __user *ubufp = (u64 __user *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	u8 prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	u32 server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	ics = kvmppc_xics_find_ics(xics, irq, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	if (!ics) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		if (!ics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	irqp = &ics->irq_state[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	if (get_user(val, ubufp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	server = val & KVM_XICS_DESTINATION_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	prio = val >> KVM_XICS_PRIORITY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	if (prio != MASKED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	    kvmppc_xics_find_server(xics->kvm, server) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	arch_spin_lock(&ics->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	irqp->server = server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	irqp->saved_priority = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	if (val & KVM_XICS_MASKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		prio = MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	irqp->priority = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	irqp->resend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	irqp->masked_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	irqp->lsi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	irqp->pq_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	if (val & KVM_XICS_LEVEL_SENSITIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		irqp->lsi = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	/* If PENDING, set P in case P is not saved because of old code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		irqp->pq_state |= PQ_PRESENTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	if (val & KVM_XICS_QUEUED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		irqp->pq_state |= PQ_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	irqp->exists = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	arch_spin_unlock(&ics->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	if (val & KVM_XICS_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		icp_deliver_irq(xics, NULL, irqp->number, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			bool line_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	struct kvmppc_xics *xics = kvm->arch.xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	if (!xics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	return ics_deliver_irq(xics, irq, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	struct kvmppc_xics *xics = dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	case KVM_DEV_XICS_GRP_SOURCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		return xics_set_source(xics, attr->attr, attr->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	struct kvmppc_xics *xics = dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	case KVM_DEV_XICS_GRP_SOURCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		return xics_get_source(xics, attr->attr, attr->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	case KVM_DEV_XICS_GRP_SOURCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		    attr->attr < KVMPPC_XICS_NR_IRQS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)  * Called when device fd is closed. kvm->lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) static void kvmppc_xics_release(struct kvm_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	struct kvmppc_xics *xics = dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	struct kvm *kvm = xics->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	pr_devel("Releasing xics device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	 * Since this is the device release function, we know that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	 * userspace does not have any open fd referring to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	 * device.  Therefore there can not be any of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	 * attribute set/get functions being executed concurrently,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	 * and similarly, the connect_vcpu and set/clr_mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	 * functions also cannot be being executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	debugfs_remove(xics->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	 * We should clean up the vCPU interrupt presenters first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		 * Take vcpu->mutex to ensure that no one_reg get/set ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		 * (i.e. kvmppc_xics_[gs]et_icp) can be done concurrently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		 * Holding the vcpu->mutex also means that execution is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		 * excluded for the vcpu until the ICP was freed. When the vcpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		 * can execute again, vcpu->arch.icp and vcpu->arch.irq_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		 * have been cleared and the vcpu will not be going into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		 * XICS code anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		mutex_lock(&vcpu->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		kvmppc_xics_free_icp(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		mutex_unlock(&vcpu->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	if (kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		kvm->arch.xics = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	for (i = 0; i <= xics->max_icsid; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		kfree(xics->ics[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		xics->ics[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	 * A reference of the kvmppc_xics pointer is now kept under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	 * the xics_device pointer of the machine for reuse. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	 * freed when the VM is destroyed for now until we fix all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	 * execution paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) static struct kvmppc_xics *kvmppc_xics_get_device(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	struct kvmppc_xics **kvm_xics_device = &kvm->arch.xics_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	struct kvmppc_xics *xics = *kvm_xics_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	if (!xics) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		xics = kzalloc(sizeof(*xics), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		*kvm_xics_device = xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		memset(xics, 0, sizeof(*xics));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	return xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	struct kvmppc_xics *xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	struct kvm *kvm = dev->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	pr_devel("Creating xics for partition\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	/* Already there ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	if (kvm->arch.xics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	xics = kvmppc_xics_get_device(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	if (!xics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	dev->private = xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	xics->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	xics->kvm = kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	kvm->arch.xics = xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	if (cpu_has_feature(CPU_FTR_ARCH_206) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	    cpu_has_feature(CPU_FTR_HVMODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		/* Enable real mode support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		xics->real_mode = ENABLE_REALMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		xics->real_mode_dbg = DEBUG_REALMODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) static void kvmppc_xics_init(struct kvm_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	xics_debugfs_init(xics);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) struct kvm_device_ops kvm_xics_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	.name = "kvm-xics",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	.create = kvmppc_xics_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	.init = kvmppc_xics_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	.release = kvmppc_xics_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	.set_attr = xics_set_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	.get_attr = xics_get_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	.has_attr = xics_has_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 			     u32 xcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	struct kvmppc_xics *xics = dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	int r = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	if (dev->ops != &kvm_xics_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	if (xics->kvm != vcpu->kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	r = kvmppc_xics_create_icp(vcpu, xcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	if (!vcpu->arch.icp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	kfree(vcpu->arch.icp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	vcpu->arch.icp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 			    unsigned long host_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	struct kvmppc_xics *xics = kvm->arch.xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	struct kvmppc_ics *ics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	ics = kvmppc_xics_find_ics(xics, irq, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	if (!ics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	ics->irq_state[idx].host_irq = host_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	ics->irq_state[idx].intr_cpu = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 			    unsigned long host_irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	struct kvmppc_xics *xics = kvm->arch.xics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	struct kvmppc_ics *ics;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	ics = kvmppc_xics_find_ics(xics, irq, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	if (!ics)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	ics->irq_state[idx].host_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);