^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #define pr_fmt(fmt) "xive-kvm: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/kvm_book3s.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/kvm_ppc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/hvcall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/xics.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/xive.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/xive-regs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/opal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "book3s_xive.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * Virtual mode variants of the hcalls for use on radix/radix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * with AIL. They require the VCPU's VP to be "pushed"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * We still instantiate them here because we use some of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * generated utility functions as well in this file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define XIVE_RUNTIME_CHECKS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define X_PFX xive_vm_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define X_STATIC static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define X_STAT_PFX stat_vm_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define __x_tima xive_tima
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define __x_writeb __raw_writeb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define __x_readw __raw_readw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define __x_readq __raw_readq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define __x_writeq __raw_writeq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include "book3s_xive_template.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * We leave a gap of a couple of interrupts in the queue to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * account for the IPI and additional safety guard.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define XIVE_Q_GAP 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Push a vcpu's context to the XIVE on guest entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * This assumes we are in virtual mode (MMU on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u64 pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * Nothing to do if the platform doesn't have a XIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * or this vCPU doesn't have its own XIVE context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * (e.g. because it's not using an in-kernel interrupt controller).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (!tima || !vcpu->arch.xive_cam_word)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) eieio();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) vcpu->arch.xive_pushed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) eieio();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * We clear the irq_pending flag. There is a small chance of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * race vs. the escalation interrupt happening on another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * processor setting it again, but the only consequence is to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * cause a spurious wakeup on the next H_CEDE, which is not an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) vcpu->arch.irq_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * In single escalation mode, if the escalation interrupt is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * on, we mask it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (vcpu->arch.xive_esc_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) XIVE_ESB_SET_PQ_01));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * We have a possible subtle race here: The escalation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * interrupt might have fired and be on its way to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * host queue while we mask it, and if we unmask it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * early enough (re-cede right away), there is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * theorical possibility that it fires again, thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * landing in the target queue more than once which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * a big no-no.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * Fortunately, solving this is rather easy. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * above load setting PQ to 01 returns a previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * value where P is set, then we know the escalation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * interrupt is somewhere on its way to the host. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * that case we simply don't clear the xive_esc_on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * flag below. It will be eventually cleared by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * handler for the escalation interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Then, when doing a cede, we check that flag again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * before re-enabling the escalation interrupt, and if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * set, we abort the cede.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!(pq & XIVE_ESB_VAL_P))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* Now P is 0, we can clear the flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) vcpu->arch.xive_esc_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * This is a simple trigger for a generic XIVE IRQ. This must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * only be called for interrupts that support a trigger page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static bool xive_irq_trigger(struct xive_irq_data *xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* This should be only for MSIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Those interrupts should always have a trigger page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (WARN_ON(!xd->trig_mmio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) out_be64(xd->trig_mmio, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static irqreturn_t xive_esc_irq(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct kvm_vcpu *vcpu = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) vcpu->arch.irq_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (vcpu->arch.ceded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) kvmppc_fast_vcpu_kick(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* Since we have the no-EOI flag, the interrupt is effectively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * disabled now. Clearing xive_esc_on means we won't bother
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * doing so on the next entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * This also allows the entry code to know that if a PQ combination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * of 10 is observed while xive_esc_on is true, it means the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * contains an unprocessed escalation interrupt. We don't make use of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) vcpu->arch.xive_esc_on = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* This orders xive_esc_on = false vs. subsequent stale_p = true */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) smp_wmb(); /* goes with smp_mb() in cleanup_single_escalation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) bool single_escalation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct xive_q *q = &xc->queues[prio];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) char *name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* Already there ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (xc->esc_virq[prio])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Hook up the escalation interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (!xc->esc_virq[prio]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) prio, xc->server_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (single_escalation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) vcpu->kvm->arch.lpid, xc->server_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) vcpu->kvm->arch.lpid, xc->server_num, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) prio, xc->server_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) IRQF_NO_THREAD, name, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) prio, xc->server_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) xc->esc_virq_names[prio] = name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* In single escalation mode, we grab the ESB MMIO of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * interrupt and mask it. Also populate the VCPU v/raddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * of the ESB page for use by asm entry/exit code. Finally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * set the XIVE_IRQ_NO_EOI flag which will prevent the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * core code from performing an EOI on the escalation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * interrupt, thus leaving it effectively masked after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * it fires once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (single_escalation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) vcpu->arch.xive_esc_raddr = xd->eoi_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) xd->flags |= XIVE_IRQ_NO_EOI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) irq_dispose_mapping(xc->esc_virq[prio]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) xc->esc_virq[prio] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct kvmppc_xive *xive = xc->xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct xive_q *q = &xc->queues[prio];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) void *qpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (WARN_ON(q->qpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /* Allocate the queue and retrieve infos on current node for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (!qpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) pr_err("Failed to allocate queue %d for VCPU %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) prio, xc->server_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) memset(qpage, 0, 1 << xive->q_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * Reconfigure the queue. This will set q->qpage only once the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * queue is fully configured. This is a requirement for prio 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * as we will stop doing EOIs for every IPI as soon as we observe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * qpage being non-NULL, and instead will only EOI when we receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * corresponding queue 0 entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) xive->q_order, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) pr_err("Failed to configure queue %d for VCPU %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) prio, xc->server_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* Called with xive->lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static int xive_check_provisioning(struct kvm *kvm, u8 prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct kvmppc_xive *xive = kvm->arch.xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) lockdep_assert_held(&xive->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* Already provisioned ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (xive->qmap & (1 << prio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) pr_devel("Provisioning prio... %d\n", prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* Provision each VCPU and enable escalations if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!vcpu->arch.xive_vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) rc = xive_provision_queue(vcpu, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (rc == 0 && !xive->single_escalation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) kvmppc_xive_attach_escalation(vcpu, prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) xive->single_escalation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* Order previous stores and mark it as provisioned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) xive->qmap |= (1 << prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct kvmppc_xive_vcpu *xc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct xive_q *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* Locate target server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) vcpu = kvmppc_xive_find_server(kvm, server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) pr_warn("%s: Can't find server %d\n", __func__, server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) xc = vcpu->arch.xive_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (WARN_ON(!xc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) q = &xc->queues[prio];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) atomic_inc(&q->pending_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct xive_q *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) u32 max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (WARN_ON(!xc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!xc->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) q = &xc->queues[prio];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (WARN_ON(!q->qpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /* Calculate max number of interrupts in that queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) max = (q->msk + 1) - XIVE_Q_GAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* Locate target server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) vcpu = kvmppc_xive_find_server(kvm, *server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (!vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) pr_devel("Can't find server %d\n", *server);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /* Try pick it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) rc = xive_try_pick_queue(vcpu, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) pr_devel(" .. failed, looking up candidate...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* Failed, pick another VCPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (!vcpu->arch.xive_vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) rc = xive_try_pick_queue(vcpu, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) *server = vcpu->arch.xive_vcpu->server_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) pr_devel(" found on 0x%x/%d\n", *server, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) pr_devel(" no available target !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* No available target ! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct kvmppc_xive_src_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct kvmppc_xive_irq_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct xive_irq_data *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) u32 hw_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) u8 old_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * Take the lock, set masked, try again if racing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * with H_EOI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) arch_spin_lock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) old_prio = state->guest_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) state->guest_priority = MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (!state->in_eoi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) state->guest_priority = old_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) arch_spin_unlock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) /* No change ? Bail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (old_prio == MASKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return old_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /* Get the right irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) kvmppc_xive_select_irq(state, &hw_num, &xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * If the interrupt is marked as needing masking via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * firmware, we do it here. Firmware masking however
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * is "lossy", it won't return the old p and q bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * and won't set the interrupt to a state where it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * record queued ones. If this is an issue we should do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * lazy masking instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * For now, we work around this in unmask by forcing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * an interrupt whenever we unmask a non-LSI via FW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * (if ever).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) xive_native_configure_irq(hw_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) kvmppc_xive_vp(xive, state->act_server),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) MASKED, state->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /* set old_p so we can track if an H_EOI was done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) state->old_p = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) state->old_q = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /* Set PQ to 10, return old P and old Q and remember them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) state->old_p = !!(val & 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) state->old_q = !!(val & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * Synchronize hardware to sensure the queues are updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * when masking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) xive_native_sync_source(hw_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return old_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct kvmppc_xive_irq_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * Take the lock try again if racing with H_EOI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) arch_spin_lock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (!state->in_eoi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) arch_spin_unlock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static void xive_finish_unmask(struct kvmppc_xive *xive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct kvmppc_xive_src_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct kvmppc_xive_irq_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) u8 prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct xive_irq_data *xd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) u32 hw_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /* If we aren't changing a thing, move on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if (state->guest_priority != MASKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /* Get the right irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) kvmppc_xive_select_irq(state, &hw_num, &xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * See comment in xive_lock_and_mask() concerning masking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * via firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) xive_native_configure_irq(hw_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) kvmppc_xive_vp(xive, state->act_server),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) state->act_priority, state->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /* If an EOI is needed, do it here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!state->old_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) xive_vm_source_eoi(hw_num, xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* If this is not an LSI, force a trigger */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) xive_irq_trigger(xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* Old Q set, set PQ to 11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (state->old_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * If not old P, then perform an "effective" EOI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * on the source. This will handle the cases where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * FW EOI is needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (!state->old_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) xive_vm_source_eoi(hw_num, xd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* Synchronize ordering and mark unmasked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) state->guest_priority = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * Target an interrupt to a given server/prio, this will fallback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * to another server if necessary and perform the HW targetting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * updates as needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * NOTE: Must be called with the state lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) static int xive_target_interrupt(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct kvmppc_xive_irq_state *state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) u32 server, u8 prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct kvmppc_xive *xive = kvm->arch.xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) u32 hw_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * This will return a tentative server and actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * priority. The count for that new target will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * already been incremented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) rc = kvmppc_xive_select_target(kvm, &server, prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * We failed to find a target ? Not much we can do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * at least until we support the GIQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * Increment the old queue pending count if there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * was one so that the old queue count gets adjusted later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * when observed to be empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (state->act_priority != MASKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) xive_inc_q_pending(kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) state->act_server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) state->act_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * Update state and HW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) state->act_priority = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) state->act_server = server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* Get the right irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) kvmppc_xive_select_irq(state, &hw_num, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return xive_native_configure_irq(hw_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) kvmppc_xive_vp(xive, server),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) prio, state->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * Targetting rules: In order to avoid losing track of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * pending interrupts accross mask and unmask, which would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * allow queue overflows, we implement the following rules:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * - Unless it was never enabled (or we run out of capacity)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * an interrupt is always targetted at a valid server/queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * pair even when "masked" by the guest. This pair tends to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * be the last one used but it can be changed under some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * circumstances. That allows us to separate targetting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * from masking, we only handle accounting during (re)targetting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * this also allows us to let an interrupt drain into its target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * queue after masking, avoiding complex schemes to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * interrupts out of remote processor queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * - When masking, we set PQ to 10 and save the previous value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * of P and Q.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * - When unmasking, if saved Q was set, we set PQ to 11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * otherwise we leave PQ to the HW state which will be either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * 10 if nothing happened or 11 if the interrupt fired while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * masked. Effectively we are OR'ing the previous Q into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * HW Q.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * which will unmask the interrupt and shoot a new one if Q was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * effectively meaning an H_EOI from the guest is still expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * for that interrupt).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * - If H_EOI occurs while masked, we clear the saved P.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * - When changing target, we account on the new target and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * increment a separate "pending" counter on the old one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * This pending counter will be used to decrement the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * target's count when its queue has been observed empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) u32 priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct kvmppc_xive *xive = kvm->arch.xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct kvmppc_xive_src_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct kvmppc_xive_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) u8 new_act_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (!xive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) irq, server, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* First, check provisioning of queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (priority != MASKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) mutex_lock(&xive->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) rc = xive_check_provisioning(xive->kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) xive_prio_from_guest(priority));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) mutex_unlock(&xive->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) pr_devel(" provisioning failure %d !\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) sb = kvmppc_xive_find_source(xive, irq, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) state = &sb->irq_state[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * We first handle masking/unmasking since the locking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * might need to be retried due to EOIs, we'll handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * targetting changes later. These functions will return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * with the SB lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * xive_lock_and_mask() will also set state->guest_priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * but won't otherwise change other fields of the state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * xive_lock_for_unmask will not actually unmask, this will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * be done later by xive_finish_unmask() once the targetting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * has been done, so we don't try to unmask an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * that hasn't yet been targetted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (priority == MASKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) xive_lock_and_mask(xive, sb, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) xive_lock_for_unmask(sb, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * Then we handle targetting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * First calculate a new "actual priority"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) new_act_prio = state->act_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (priority != MASKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) new_act_prio = xive_prio_from_guest(priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) new_act_prio, state->act_server, state->act_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * Then check if we actually need to change anything,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * The condition for re-targetting the interrupt is that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * we have a valid new priority (new_act_prio is not 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * and either the server or the priority changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * Note: If act_priority was ff and the new priority is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * also ff, we don't do anything and leave the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * untargetted. An attempt of doing an int_on on an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * untargetted interrupt will fail. If that is a problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * we could initialize interrupts with valid default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (new_act_prio != MASKED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) (state->act_server != server ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) state->act_priority != new_act_prio))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) rc = xive_target_interrupt(kvm, state, server, new_act_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * Perform the final unmasking of the interrupt source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (priority != MASKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) xive_finish_unmask(xive, sb, state, priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * Finally Update saved_priority to match. Only int_on/off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * set this field to a different value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) state->saved_priority = priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) arch_spin_unlock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) u32 *priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct kvmppc_xive *xive = kvm->arch.xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct kvmppc_xive_src_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct kvmppc_xive_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (!xive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) sb = kvmppc_xive_find_source(xive, irq, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) state = &sb->irq_state[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) arch_spin_lock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) *server = state->act_server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) *priority = state->guest_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) arch_spin_unlock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) struct kvmppc_xive *xive = kvm->arch.xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) struct kvmppc_xive_src_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct kvmppc_xive_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (!xive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) sb = kvmppc_xive_find_source(xive, irq, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) state = &sb->irq_state[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) pr_devel("int_on(irq=0x%x)\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * Check if interrupt was not targetted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (state->act_priority == MASKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) pr_devel("int_on on untargetted interrupt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* If saved_priority is 0xff, do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (state->saved_priority == MASKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * Lock and unmask it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) xive_lock_for_unmask(sb, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) xive_finish_unmask(xive, sb, state, state->saved_priority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) arch_spin_unlock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct kvmppc_xive *xive = kvm->arch.xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct kvmppc_xive_src_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct kvmppc_xive_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (!xive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) sb = kvmppc_xive_find_source(xive, irq, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) state = &sb->irq_state[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) pr_devel("int_off(irq=0x%x)\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * Lock and mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) state->saved_priority = xive_lock_and_mask(xive, sb, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) arch_spin_unlock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) struct kvmppc_xive_src_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct kvmppc_xive_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) sb = kvmppc_xive_find_source(xive, irq, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) state = &sb->irq_state[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (!state->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * Trigger the IPI. This assumes we never restore a pass-through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * interrupt which should be safe enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) xive_irq_trigger(&state->ipi_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (!xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) /* Return the per-cpu state for state saving/migration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) u8 cppr, mfrr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) u32 xisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (!xc || !xive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) /* Grab individual state fields. We don't use pending_pri */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) KVM_REG_PPC_ICP_XISR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) xc->server_num, cppr, mfrr, xisr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * We can't update the state of a "pushed" VCPU, but that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * shouldn't happen because the vcpu->mutex makes running a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * vcpu mutually exclusive with doing one_reg get/set on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (WARN_ON(vcpu->arch.xive_pushed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /* Update VCPU HW saved state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) vcpu->arch.xive_saved_state.cppr = cppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) xc->hw_cppr = xc->cppr = cppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * Update MFRR state. If it's not 0xff, we mark the VCPU as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * having a pending MFRR change, which will re-evaluate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * target. The VCPU will thus potentially get a spurious
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * interrupt but that's not a big deal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) xc->mfrr = mfrr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (mfrr < cppr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) xive_irq_trigger(&xc->vp_ipi_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * Now saved XIRR is "interesting". It means there's something in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * the legacy "1 element" queue... for an IPI we simply ignore it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * as the MFRR restore will handle that. For anything else we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * to force a resend of the source.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * However the source may not have been setup yet. If that's the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * case, we keep that info and increment a counter in the xive to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * tell subsequent xive_set_source() to go look.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) xc->delayed_irq = xisr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) xive->delayed_irqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) pr_devel(" xisr restore delayed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) struct irq_desc *host_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct kvmppc_xive *xive = kvm->arch.xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct kvmppc_xive_src_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct kvmppc_xive_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) unsigned int host_irq = irq_desc_get_irq(host_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) u8 prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (!xive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) state = &sb->irq_state[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * Mark the passed-through interrupt as going to a VCPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * this will prevent further EOIs and similar operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * from the XIVE code. It will also mask the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * to either PQ=10 or 11 state, the latter if the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * is pending. This will allow us to unmask or retrigger it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * after routing it to the guest with a simple EOI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * The "state" argument is a "token", all it needs is to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * non-NULL to switch to passed-through or NULL for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * other way around. We may not yet have an actual VCPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * target here and we don't really care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) rc = irq_set_vcpu_affinity(host_irq, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * Mask and read state of IPI. We need to know if its P bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * is set as that means it's potentially already using a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * queue entry in the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) prio = xive_lock_and_mask(xive, sb, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) state->old_p, state->old_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* Turn the IPI hard off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * Reset ESB guest mapping. Needed when ESB pages are exposed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * to the guest in XIVE native mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (xive->ops && xive->ops->reset_mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) xive->ops->reset_mapped(kvm, guest_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /* Grab info about irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) state->pt_number = hw_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) state->pt_data = irq_data_get_irq_handler_data(host_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * Configure the IRQ to match the existing configuration of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * the IPI if it was already targetted. Otherwise this will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * mask the interrupt in a lossy way (act_priority is 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * which is fine for a never started interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) xive_native_configure_irq(hw_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) kvmppc_xive_vp(xive, state->act_server),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) state->act_priority, state->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * We do an EOI to enable the interrupt (and retrigger if needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * if the guest has the interrupt unmasked and the P bit was *not*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * set in the IPI. If it was set, we know a slot may still be in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * use in the target queue thus we have to wait for a guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * originated EOI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (prio != MASKED && !state->old_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) xive_vm_source_eoi(hw_irq, state->pt_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /* Clear old_p/old_q as they are no longer relevant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) state->old_p = state->old_q = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /* Restore guest prio (unlocks EOI) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) state->guest_priority = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) arch_spin_unlock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct irq_desc *host_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) struct kvmppc_xive *xive = kvm->arch.xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct kvmppc_xive_src_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) struct kvmppc_xive_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) unsigned int host_irq = irq_desc_get_irq(host_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) u8 prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (!xive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) state = &sb->irq_state[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * Mask and read state of IRQ. We need to know if its P bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * is set as that means it's potentially already using a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * queue entry in the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) prio = xive_lock_and_mask(xive, sb, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) state->old_p, state->old_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * If old_p is set, the interrupt is pending, we switch it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * PQ=11. This will force a resend in the host so the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * isn't lost to whatver host driver may pick it up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (state->old_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /* Release the passed-through interrupt to the host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) rc = irq_set_vcpu_affinity(host_irq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /* Forget about the IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) state->pt_number = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) state->pt_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * Reset ESB guest mapping. Needed when ESB pages are exposed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * to the guest in XIVE native mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (xive->ops && xive->ops->reset_mapped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) xive->ops->reset_mapped(kvm, guest_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) /* Reconfigure the IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) xive_native_configure_irq(state->ipi_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) kvmppc_xive_vp(xive, state->act_server),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) state->act_priority, state->number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * If old_p is set (we have a queue entry potentially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) * occupied) or the interrupt is masked, we set the IPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (prio == MASKED || state->old_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) /* Restore guest prio (unlocks EOI) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) state->guest_priority = prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) arch_spin_unlock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct kvm *kvm = vcpu->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct kvmppc_xive *xive = kvm->arch.xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) for (i = 0; i <= xive->max_sbid; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (!state->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (state->act_priority == MASKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (state->act_server != xc->server_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /* Clean it up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) arch_spin_lock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) state->act_priority = MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (state->pt_number) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) arch_spin_unlock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* Disable vcpu's escalation interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (vcpu->arch.xive_esc_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) XIVE_ESB_SET_PQ_01));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) vcpu->arch.xive_esc_on = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * Clear pointers to escalation interrupt ESB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * This is safe because the vcpu->mutex is held, preventing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * any other CPU from concurrently executing a KVM_RUN ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) vcpu->arch.xive_esc_vaddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) vcpu->arch.xive_esc_raddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * In single escalation mode, the escalation interrupt is marked so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * that EOI doesn't re-enable it, but just sets the stale_p flag to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * indicate that the P bit has already been dealt with. However, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * assembly code that enters the guest sets PQ to 00 without clearing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * stale_p (because it has no easy way to address it). Hence we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * to adjust stale_p before shutting down the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) struct kvmppc_xive_vcpu *xc, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) struct irq_data *d = irq_get_irq_data(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * This slightly odd sequence gives the right result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * (i.e. stale_p set if xive_esc_on is false) even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * we race with xive_esc_irq() and xive_irq_eoi().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) xd->stale_p = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) smp_mb(); /* paired with smb_wmb in xive_esc_irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) if (!vcpu->arch.xive_esc_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) xd->stale_p = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) if (!kvmppc_xics_enabled(vcpu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (!xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /* Ensure no interrupt is still routed to that VP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) xc->valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) kvmppc_xive_disable_vcpu_interrupts(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /* Mask the VP IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) /* Free escalations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (xc->esc_virq[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (xc->xive->single_escalation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) xive_cleanup_single_escalation(vcpu, xc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) xc->esc_virq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) free_irq(xc->esc_virq[i], vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) irq_dispose_mapping(xc->esc_virq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) kfree(xc->esc_virq_names[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* Disable the VP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) xive_native_disable_vp(xc->vp_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) /* Clear the cam word so guest entry won't try to push context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) vcpu->arch.xive_cam_word = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) /* Free the queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) struct xive_q *q = &xc->queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) xive_native_disable_queue(xc->vp_id, q, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) if (q->qpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) free_pages((unsigned long)q->qpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) xive->q_page_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) q->qpage = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* Free the IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (xc->vp_ipi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) xive_cleanup_irq_data(&xc->vp_ipi_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) xive_native_free_irq(xc->vp_ipi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) /* Free the VP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) kfree(xc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /* Cleanup the vcpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) vcpu->arch.xive_vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /* We have a block of xive->nr_servers VPs. We just need to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * packed vCPU ids are below that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) u32 vp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) pr_devel("Out of bounds !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (xive->vp_base == XIVE_INVALID_VP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (xive->vp_base == XIVE_INVALID_VP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) vp_id = kvmppc_xive_vp(xive, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) pr_devel("Duplicate !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) *vp = vp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct kvm_vcpu *vcpu, u32 cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) struct kvmppc_xive *xive = dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) struct kvmppc_xive_vcpu *xc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) int i, r = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) u32 vp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) pr_devel("connect_vcpu(cpu=%d)\n", cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (dev->ops != &kvm_xive_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) pr_devel("Wrong ops !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (xive->kvm != vcpu->kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /* We need to synchronize with queue provisioning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) mutex_lock(&xive->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) xc = kzalloc(sizeof(*xc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (!xc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) r = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) vcpu->arch.xive_vcpu = xc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) xc->xive = xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) xc->vcpu = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) xc->server_num = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) xc->vp_id = vp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) xc->mfrr = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) xc->valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /* Configure VCPU fields for use by assembly push/pull */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) /* Allocate IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) xc->vp_ipi = xive_native_alloc_irq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) if (!xc->vp_ipi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) pr_err("Failed to allocate xive irq for VCPU IPI\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) r = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) pr_devel(" IPI=0x%x\n", xc->vp_ipi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * Enable the VP first as the single escalation mode will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * affect escalation interrupts numbering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) pr_err("Failed to enable VP in OPAL, err %d\n", r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) * Initialize queues. Initially we set them all for no queueing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * and we enable escalation for queue 0 only which we'll use for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * our mfrr change notifications. If the VCPU is hot-plugged, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * do handle provisioning however based on the existing "map"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * of enabled queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) struct xive_q *q = &xc->queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) /* Single escalation, no queue 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (i == 7 && xive->single_escalation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) /* Is queue already enabled ? Provision it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (xive->qmap & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) r = xive_provision_queue(vcpu, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (r == 0 && !xive->single_escalation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) kvmppc_xive_attach_escalation(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) vcpu, i, xive->single_escalation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) r = xive_native_configure_queue(xc->vp_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) q, i, NULL, 0, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) pr_err("Failed to configure queue %d for VCPU %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) i, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) /* If not done above, attach priority 0 escalation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* Route the IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) mutex_unlock(&xive->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) kvmppc_xive_cleanup_vcpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * Scanning of queues before/after migration save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) struct kvmppc_xive_src_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) struct kvmppc_xive_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) sb = kvmppc_xive_find_source(xive, irq, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) state = &sb->irq_state[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /* Some sanity checking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) if (!state->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) pr_err("invalid irq 0x%x in cpu queue!\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * If the interrupt is in a queue it should have P set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * We warn so that gets reported. A backtrace isn't useful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * so no need to use a WARN_ON.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) if (!state->saved_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /* Set flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) state->in_queue = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) struct kvmppc_xive_src_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) u32 irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (!state->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) /* Mask and save state, this will also sync HW queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /* Transfer P and Q */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) state->saved_p = state->old_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) state->saved_q = state->old_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) /* Unlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) arch_spin_unlock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) struct kvmppc_xive_src_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) u32 irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (!state->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * Lock / exclude EOI (not technically necessary if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * guest isn't running concurrently. If this becomes a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * performance issue we can probably remove the lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) xive_lock_for_unmask(sb, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) /* Restore mask/prio if it wasn't masked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (state->saved_scan_prio != MASKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) /* Unlock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) arch_spin_unlock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) u32 idx = q->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) u32 toggle = q->toggle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) u32 irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (irq > XICS_IPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) xive_pre_save_set_queued(xive, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) } while(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) static void xive_pre_save_scan(struct kvmppc_xive *xive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) struct kvm_vcpu *vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * See comment in xive_get_source() about how this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * work. Collect a stable state for all interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) for (i = 0; i <= xive->max_sbid; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) xive_pre_save_mask_irq(xive, sb, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /* Then scan the queues and update the "in_queue" flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) kvm_for_each_vcpu(i, vcpu, xive->kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (!xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (xc->queues[j].qpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) xive_pre_save_queue(xive, &xc->queues[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) /* Finally restore interrupt states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) for (i = 0; i <= xive->max_sbid; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) xive_pre_save_unmask_irq(xive, sb, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) static void xive_post_save_scan(struct kvmppc_xive *xive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) u32 i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) /* Clear all the in_queue flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) for (i = 0; i <= xive->max_sbid; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) sb->irq_state[j].in_queue = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) /* Next get_source() will do a new scan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) xive->saved_src_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * This returns the source configuration and state to user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) struct kvmppc_xive_src_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct kvmppc_xive_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) u64 __user *ubufp = (u64 __user *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) u64 val, prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) sb = kvmppc_xive_find_source(xive, irq, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) state = &sb->irq_state[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (!state->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) pr_devel("get_source(%ld)...\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * So to properly save the state into something that looks like a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * XICS migration stream we cannot treat interrupts individually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) * We need, instead, mask them all (& save their previous PQ state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) * to get a stable state in the HW, then sync them to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * any interrupt that had already fired hits its queue, and finally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) * scan all the queues to collect which interrupts are still present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * in the queues, so we can set the "pending" flag on them and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * they can be resent on restore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * So we do it all when the "first" interrupt gets saved, all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * state is collected at that point, the rest of xive_get_source()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * will merely collect and convert that state to the expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * userspace bit mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (xive->saved_src_count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) xive_pre_save_scan(xive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) xive->saved_src_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) /* Convert saved state into something compatible with xics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) val = state->act_server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) prio = state->saved_scan_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (prio == MASKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) val |= KVM_XICS_MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) prio = state->saved_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) val |= prio << KVM_XICS_PRIORITY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (state->lsi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) val |= KVM_XICS_LEVEL_SENSITIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) if (state->saved_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) val |= KVM_XICS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) if (state->saved_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) val |= KVM_XICS_PRESENTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) if (state->saved_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) val |= KVM_XICS_QUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * We mark it pending (which will attempt a re-delivery)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) * if we are in a queue *or* we were masked and had
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) * Q set which is equivalent to the XICS "masked pending"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) * state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (state->in_queue || (prio == MASKED && state->saved_q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) val |= KVM_XICS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * If that was the last interrupt saved, reset the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) * in_queue flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) if (xive->saved_src_count == xive->src_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) xive_post_save_scan(xive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) /* Copy the result to userspace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (put_user(val, ubufp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) struct kvmppc_xive *xive, int irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) struct kvmppc_xive_src_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) int i, bid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) bid = irq >> KVMPPC_XICS_ICS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) mutex_lock(&xive->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) /* block already exists - somebody else got here first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (xive->src_blocks[bid])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) /* Create the ICS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) sb = kzalloc(sizeof(*sb), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) sb->id = bid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) sb->irq_state[i].eisn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) sb->irq_state[i].guest_priority = MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) sb->irq_state[i].saved_priority = MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) sb->irq_state[i].act_priority = MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) xive->src_blocks[bid] = sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (bid > xive->max_sbid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) xive->max_sbid = bid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) mutex_unlock(&xive->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) return xive->src_blocks[bid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) struct kvm *kvm = xive->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) struct kvm_vcpu *vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) if (!xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) if (xc->delayed_irq == irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) xc->delayed_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) xive->delayed_irqs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) struct kvmppc_xive_src_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) struct kvmppc_xive_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) u64 __user *ubufp = (u64 __user *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) u8 act_prio, guest_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) u32 server;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) pr_devel("set_source(irq=0x%lx)\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) /* Find the source */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) sb = kvmppc_xive_find_source(xive, irq, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (!sb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) pr_devel("No source, creating source block...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) sb = kvmppc_xive_create_src_block(xive, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) if (!sb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) pr_devel("Failed to create block...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) state = &sb->irq_state[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) /* Read user passed data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (get_user(val, ubufp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) pr_devel("fault getting user info !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) server = val & KVM_XICS_DESTINATION_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) val, server, guest_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) * If the source doesn't already have an IPI, allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) * one and get the corresponding data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (!state->ipi_number) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) state->ipi_number = xive_native_alloc_irq();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (state->ipi_number == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) pr_devel("Failed to allocate IPI !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) pr_devel(" src_ipi=0x%x\n", state->ipi_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * We use lock_and_mask() to set us in the right masked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) * state. We will override that state from the saved state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * further down, but this will handle the cases of interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) * that need FW masking. We set the initial guest_priority to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) * 0 before calling it to ensure it actually performs the masking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) state->guest_priority = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) xive_lock_and_mask(xive, sb, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * Now, we select a target if we have one. If we don't we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) * leave the interrupt untargetted. It means that an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * can become "untargetted" accross migration if it was masked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * by set_xive() but there is little we can do about it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) /* First convert prio and mark interrupt as untargetted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) act_prio = xive_prio_from_guest(guest_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) state->act_priority = MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * We need to drop the lock due to the mutex below. Hopefully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * nothing is touching that interrupt yet since it hasn't been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * advertized to a running guest yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) arch_spin_unlock(&sb->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) /* If we have a priority target the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) if (act_prio != MASKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) /* First, check provisioning of queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) mutex_lock(&xive->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) rc = xive_check_provisioning(xive->kvm, act_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) mutex_unlock(&xive->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) /* Target interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) rc = xive_target_interrupt(xive->kvm, state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) server, act_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * If provisioning or targetting failed, leave it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) * alone and masked. It will remain disabled until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) * the guest re-targets it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) * Find out if this was a delayed irq stashed in an ICP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) * in which case, treat it as pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) val |= KVM_XICS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) pr_devel(" Found delayed ! forcing PENDING !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) /* Cleanup the SW state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) state->old_p = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) state->old_q = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) state->lsi = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) state->asserted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) /* Restore LSI state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (val & KVM_XICS_LEVEL_SENSITIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) state->lsi = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (val & KVM_XICS_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) state->asserted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) pr_devel(" LSI ! Asserted=%d\n", state->asserted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * Restore P and Q. If the interrupt was pending, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * force Q and !P, which will trigger a resend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) * That means that a guest that had both an interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * pending (queued) and Q set will restore with only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * one instance of that interrupt instead of 2, but that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * is perfectly fine as coalescing interrupts that haven't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) * been presented yet is always allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) state->old_p = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) state->old_q = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) * If the interrupt was unmasked, update guest priority and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) * perform the appropriate state transition and do a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) * re-trigger if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (val & KVM_XICS_MASKED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) pr_devel(" masked, saving prio\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) state->guest_priority = MASKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) state->saved_priority = guest_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) xive_finish_unmask(xive, sb, state, guest_prio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) state->saved_priority = guest_prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) /* Increment the number of valid sources and mark this one valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (!state->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) xive->src_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) state->valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) bool line_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) struct kvmppc_xive *xive = kvm->arch.xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) struct kvmppc_xive_src_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) struct kvmppc_xive_irq_state *state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) u16 idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) if (!xive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) sb = kvmppc_xive_find_source(xive, irq, &idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) if (!sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) /* Perform locklessly .... (we need to do some RCUisms here...) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) state = &sb->irq_state[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (!state->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) /* We don't allow a trigger on a passed-through interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (state->pt_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) state->asserted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) state->asserted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) /* Trigger the IPI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) xive_irq_trigger(&state->ipi_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) u32 __user *ubufp = (u32 __user *) addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) u32 nr_servers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (get_user(nr_servers, ubufp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) pr_devel("%s nr_servers=%u\n", __func__, nr_servers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) if (!nr_servers || nr_servers > KVM_MAX_VCPU_ID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) mutex_lock(&xive->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) if (xive->vp_base != XIVE_INVALID_VP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) /* The VP block is allocated once and freed when the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * is released. Better not allow to change its size since its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * used by connect_vcpu to validate vCPU ids are valid (eg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * setting it back to a higher value could allow connect_vcpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * to come up with a VP id that goes beyond the VP block, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) * is likely to cause a crash in OPAL).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) else if (nr_servers > KVM_MAX_VCPUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) /* We don't need more servers. Higher vCPU ids get packed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) * down below KVM_MAX_VCPUS by kvmppc_pack_vcpu_id().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) xive->nr_servers = KVM_MAX_VCPUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) xive->nr_servers = nr_servers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) mutex_unlock(&xive->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) struct kvmppc_xive *xive = dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) /* We honor the existing XICS ioctl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) case KVM_DEV_XICS_GRP_SOURCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) return xive_set_source(xive, attr->attr, attr->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) case KVM_DEV_XICS_GRP_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) case KVM_DEV_XICS_NR_SERVERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) return kvmppc_xive_set_nr_servers(xive, attr->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) struct kvmppc_xive *xive = dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) /* We honor the existing XICS ioctl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) case KVM_DEV_XICS_GRP_SOURCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) return xive_get_source(xive, attr->attr, attr->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) /* We honor the same limits as XICS, at least for now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) case KVM_DEV_XICS_GRP_SOURCES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) attr->attr < KVMPPC_XICS_NR_IRQS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) case KVM_DEV_XICS_GRP_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) case KVM_DEV_XICS_NR_SERVERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) xive_native_configure_irq(hw_num, 0, MASKED, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (!state->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) xive_cleanup_irq_data(&state->ipi_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) xive_native_free_irq(state->ipi_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) /* Pass-through, cleanup too but keep IRQ hw data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if (state->pt_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) state->valid = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * Called when device fd is closed. kvm->lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) static void kvmppc_xive_release(struct kvm_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) struct kvmppc_xive *xive = dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) struct kvm *kvm = xive->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) pr_devel("Releasing xive device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) * Since this is the device release function, we know that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) * userspace does not have any open fd referring to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) * device. Therefore there can not be any of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) * attribute set/get functions being executed concurrently,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * and similarly, the connect_vcpu and set/clr_mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * functions also cannot be being executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) debugfs_remove(xive->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) * We should clean up the vCPU interrupt presenters first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) * Take vcpu->mutex to ensure that no one_reg get/set ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) * Holding the vcpu->mutex also means that the vcpu cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * be executing the KVM_RUN ioctl, and therefore it cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * be executing the XIVE push or pull code or accessing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * the XIVE MMIO regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) mutex_lock(&vcpu->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) kvmppc_xive_cleanup_vcpu(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) mutex_unlock(&vcpu->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * against xive code getting called during vcpu execution or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) * set/get one_reg operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) kvm->arch.xive = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) /* Mask and free interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) for (i = 0; i <= xive->max_sbid; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) if (xive->src_blocks[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) kvmppc_xive_free_sources(xive->src_blocks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) kfree(xive->src_blocks[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) xive->src_blocks[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) if (xive->vp_base != XIVE_INVALID_VP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) xive_native_free_vp_block(xive->vp_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) * A reference of the kvmppc_xive pointer is now kept under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) * the xive_devices struct of the machine for reuse. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) * freed when the VM is destroyed for now until we fix all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) * execution paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * When the guest chooses the interrupt mode (XICS legacy or XIVE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * native), the VM will switch of KVM device. The previous device will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * be "released" before the new one is created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) * Until we are sure all execution paths are well protected, provide a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) * fail safe (transitional) method for device destruction, in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) * the XIVE device pointer is recycled and not directly freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) &kvm->arch.xive_devices.native :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) &kvm->arch.xive_devices.xics_on_xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) struct kvmppc_xive *xive = *kvm_xive_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (!xive) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) xive = kzalloc(sizeof(*xive), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) *kvm_xive_device = xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) memset(xive, 0, sizeof(*xive));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) return xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) * Create a XICS device with XIVE backend. kvm->lock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) struct kvmppc_xive *xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) struct kvm *kvm = dev->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) pr_devel("Creating xive for partition\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) /* Already there ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) if (kvm->arch.xive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) xive = kvmppc_xive_get_device(kvm, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (!xive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) dev->private = xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) xive->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) xive->kvm = kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) mutex_init(&xive->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) /* We use the default queue size set by the host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) xive->q_order = xive_native_default_eq_shift();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if (xive->q_order < PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) xive->q_page_order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) xive->q_page_order = xive->q_order - PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) /* VP allocation is delayed to the first call to connect_vcpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) xive->vp_base = XIVE_INVALID_VP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) * on a POWER9 system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) xive->nr_servers = KVM_MAX_VCPUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) xive->single_escalation = xive_native_has_single_escalation();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) kvm->arch.xive = xive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) struct xive_q *q = &xc->queues[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) u32 i0, i1, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) if (!q->qpage && !xc->esc_virq[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) seq_printf(m, " [q%d]: ", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) if (q->qpage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) idx = q->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) i0 = be32_to_cpup(q->qpage + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) idx = (idx + 1) & q->msk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) i1 = be32_to_cpup(q->qpage + idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) i0, i1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) if (xc->esc_virq[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) struct xive_irq_data *xd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) irq_data_get_irq_handler_data(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) xc->esc_virq[i], pq, xd->eoi_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) seq_puts(m, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) static int xive_debug_show(struct seq_file *m, void *private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) struct kvmppc_xive *xive = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) struct kvm *kvm = xive->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) u64 t_rm_h_xirr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) u64 t_rm_h_ipoll = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) u64 t_rm_h_cppr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) u64 t_rm_h_eoi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) u64 t_rm_h_ipi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) u64 t_vm_h_xirr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) u64 t_vm_h_ipoll = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) u64 t_vm_h_cppr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) u64 t_vm_h_eoi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) u64 t_vm_h_ipi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) if (!kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) seq_printf(m, "=========\nVCPU state\n=========\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) kvm_for_each_vcpu(i, vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (!xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) seq_printf(m, "cpu server %#x VP:%#x CPPR:%#x HWCPPR:%#x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) xc->server_num, xc->vp_id, xc->cppr, xc->hw_cppr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) xc->mfrr, xc->pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) kvmppc_xive_debug_show_queues(m, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) t_rm_h_xirr += xc->stat_rm_h_xirr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) t_rm_h_ipoll += xc->stat_rm_h_ipoll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) t_rm_h_cppr += xc->stat_rm_h_cppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) t_rm_h_eoi += xc->stat_rm_h_eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) t_rm_h_ipi += xc->stat_rm_h_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) t_vm_h_xirr += xc->stat_vm_h_xirr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) t_vm_h_ipoll += xc->stat_vm_h_ipoll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) t_vm_h_cppr += xc->stat_vm_h_cppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) t_vm_h_eoi += xc->stat_vm_h_eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) t_vm_h_ipi += xc->stat_vm_h_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) seq_printf(m, "Hcalls totals\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) DEFINE_SHOW_ATTRIBUTE(xive_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) static void xive_debugfs_init(struct kvmppc_xive *xive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) if (!name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) pr_err("%s: no memory for name\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) xive, &xive_debug_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) pr_debug("%s: created %s\n", __func__, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) static void kvmppc_xive_init(struct kvm_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) /* Register some debug interfaces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) xive_debugfs_init(xive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) struct kvm_device_ops kvm_xive_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) .name = "kvm-xive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) .create = kvmppc_xive_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) .init = kvmppc_xive_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) .release = kvmppc_xive_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) .set_attr = xive_set_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) .get_attr = xive_get_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) .has_attr = xive_has_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) void kvmppc_xive_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) __xive_vm_h_xirr = xive_vm_h_xirr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) __xive_vm_h_ipoll = xive_vm_h_ipoll;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) __xive_vm_h_ipi = xive_vm_h_ipi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) __xive_vm_h_cppr = xive_vm_h_cppr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) __xive_vm_h_eoi = xive_vm_h_eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) void kvmppc_xive_exit_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) __xive_vm_h_xirr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) __xive_vm_h_ipoll = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) __xive_vm_h_ipi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) __xive_vm_h_cppr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) __xive_vm_h_eoi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) }