^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * GICv3 ITS emulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2015,2016 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Andre Przywara <andre.przywara@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/list_sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/irqchip/arm-gic-v3.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/kvm_emulate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/kvm_arm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/kvm_mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "vgic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "vgic-mmio.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static int vgic_its_save_tables_v0(struct vgic_its *its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static int vgic_its_restore_tables_v0(struct vgic_its *its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static int vgic_its_commit_v0(struct vgic_its *its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct kvm_vcpu *filter_vcpu, bool needs_inv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Creates a new (reference to a) struct vgic_irq for a given LPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * If this LPI is already mapped on another ITS, we increase its refcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * and return a pointer to the existing structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * This function returns a pointer to the _unlocked_ structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct vgic_dist *dist = &kvm->arch.vgic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* In this case there is no put, since we keep the reference. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) if (irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (!irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) INIT_LIST_HEAD(&irq->lpi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) INIT_LIST_HEAD(&irq->ap_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) raw_spin_lock_init(&irq->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) irq->config = VGIC_CONFIG_EDGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) kref_init(&irq->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) irq->intid = intid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) irq->target_vcpu = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) irq->group = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * There could be a race with another vgic_add_lpi(), so we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * check that we don't add a second list entry with the same LPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (oldirq->intid != intid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* Someone was faster with adding this LPI, lets use that. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) kfree(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) irq = oldirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * This increases the refcount, the caller is expected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * call vgic_put_irq() on the returned pointer once it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * finished with the IRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) vgic_get_irq_kref(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) dist->lpi_list_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * We "cache" the configuration table entries in our struct vgic_irq's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * However we only have those structs for mapped IRQs, so we read in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * the respective config data from memory here upon mapping the LPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * Should any of these fail, behave as if we couldn't create the LPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * by dropping the refcount and returning the error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ret = update_lpi_config(kvm, irq, NULL, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) vgic_put_irq(kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) vgic_put_irq(kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct its_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct list_head dev_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* the head for the list of ITTEs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct list_head itt_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u32 num_eventid_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) gpa_t itt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u32 device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define COLLECTION_NOT_MAPPED ((u32)~0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct its_collection {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct list_head coll_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) u32 collection_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) u32 target_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define its_is_collection_mapped(coll) ((coll) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ((coll)->target_addr != COLLECTION_NOT_MAPPED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct its_ite {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct list_head ite_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct its_collection *collection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u32 event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct vgic_translation_cache_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct list_head entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) phys_addr_t db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) u32 devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) u32 eventid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * struct vgic_its_abi - ITS abi ops and settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * @cte_esz: collection table entry size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * @dte_esz: device table entry size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * @ite_esz: interrupt translation table entry size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * @save tables: save the ITS tables into guest RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * @restore_tables: restore the ITS internal structs from tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * stored in guest RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * @commit: initialize the registers which expose the ABI settings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * especially the entry sizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct vgic_its_abi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int cte_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int dte_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int ite_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int (*save_tables)(struct vgic_its *its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int (*restore_tables)(struct vgic_its *its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int (*commit)(struct vgic_its *its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define ABI_0_ESZ 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define ESZ_MAX ABI_0_ESZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static const struct vgic_its_abi its_table_abi_versions[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) [0] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) .cte_esz = ABI_0_ESZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) .dte_esz = ABI_0_ESZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) .ite_esz = ABI_0_ESZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) .save_tables = vgic_its_save_tables_v0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) .restore_tables = vgic_its_restore_tables_v0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) .commit = vgic_its_commit_v0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define NR_ITS_ABIS ARRAY_SIZE(its_table_abi_versions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return &its_table_abi_versions[its->abi_rev];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static int vgic_its_set_abi(struct vgic_its *its, u32 rev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) const struct vgic_its_abi *abi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) its->abi_rev = rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) abi = vgic_its_get_abi(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return abi->commit(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Find and returns a device in the device table for an ITS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * Must be called with the its_lock mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct its_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) list_for_each_entry(device, &its->device_list, dev_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (device_id == device->device_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Find and returns an interrupt translation table entry (ITTE) for a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * Device ID/Event ID pair on an ITS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * Must be called with the its_lock mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u32 event_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct its_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct its_ite *ite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) device = find_its_device(its, device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (device == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) list_for_each_entry(ite, &device->itt_head, ite_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (ite->event_id == event_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return ite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /* To be used as an iterator this macro misses the enclosing parentheses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #define for_each_lpi_its(dev, ite, its) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) list_for_each_entry(dev, &(its)->device_list, dev_list) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) list_for_each_entry(ite, &(dev)->itt_head, ite_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #define GIC_LPI_OFFSET 8192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #define VITS_TYPER_IDBITS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #define VITS_TYPER_DEVBITS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #define VITS_DTE_MAX_DEVID_OFFSET (BIT(14) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #define VITS_ITE_MAX_EVENTID_OFFSET (BIT(16) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * Finds and returns a collection in the ITS collection table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Must be called with the its_lock mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct its_collection *collection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) list_for_each_entry(collection, &its->collection_list, coll_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (coll_id == collection->collection_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return collection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #define LPI_PROP_ENABLE_BIT(p) ((p) & LPI_PROP_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define LPI_PROP_PRIORITY(p) ((p) & 0xfc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * Reads the configuration data for a given LPI from guest memory and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * updates the fields in struct vgic_irq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * VCPU. Unconditionally applies if filter_vcpu is NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct kvm_vcpu *filter_vcpu, bool needs_inv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) u8 prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) &prop, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) irq->priority = LPI_PROP_PRIORITY(prop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) irq->enabled = LPI_PROP_ENABLE_BIT(prop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (!irq->hw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) vgic_queue_irq_unlock(kvm, irq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (irq->hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * Create a snapshot of the current LPIs targeting @vcpu, so that we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * enumerate those LPIs without holding any lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * Returns their number and puts the kmalloc'ed array into intid_ptr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct vgic_dist *dist = &kvm->arch.vgic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) u32 *intids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) int irq_count, i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * There is an obvious race between allocating the array and LPIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * being mapped/unmapped. If we ended up here as a result of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * command, we're safe (locks are held, preventing another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * command). If coming from another path (such as enabling LPIs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * we must be careful not to overrun the array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) irq_count = READ_ONCE(dist->lpi_list_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!intids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (i == irq_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* We don't need to "get" the IRQ, as we hold the list lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (vcpu && irq->target_vcpu != vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) intids[i++] = irq->intid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) *intid_ptr = intids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) irq->target_vcpu = vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (irq->hw) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct its_vlpi_map map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ret = its_get_vlpi(irq->host_irq, &map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (map.vpe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) atomic_dec(&map.vpe->vlpi_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) atomic_inc(&map.vpe->vlpi_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ret = its_map_vlpi(irq->host_irq, &map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * is targeting) to the VGIC's view, which deals with target VCPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * Needs to be called whenever either the collection for a LPIs has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * changed or the collection itself got retargeted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (!its_is_collection_mapped(ite->collection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) update_affinity(ite->irq, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * Updates the target VCPU for every LPI targeting this collection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * Must be called with the its_lock mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct its_collection *coll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct its_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct its_ite *ite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) for_each_lpi_its(device, ite, its) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (!ite->collection || coll != ite->collection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) update_affinity_ite(kvm, ite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static u32 max_lpis_propbaser(u64 propbaser)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int nr_idbits = (propbaser & 0x1f) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * Sync the pending table pending bit of LPIs targeting @vcpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * with our own data structures. This relies on the LPI being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * mapped before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) int last_byte_offset = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) u32 *intids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int nr_irqs, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) u8 pendmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) nr_irqs = vgic_copy_lpi_list(vcpu->kvm, vcpu, &intids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (nr_irqs < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return nr_irqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) for (i = 0; i < nr_irqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int byte_offset, bit_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) byte_offset = intids[i] / BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) bit_nr = intids[i] % BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * For contiguously allocated LPIs chances are we just read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * this very same byte in the last iteration. Reuse that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (byte_offset != last_byte_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ret = kvm_read_guest_lock(vcpu->kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) pendbase + byte_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) &pendmask, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) kfree(intids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) last_byte_offset = byte_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) irq->pending_latch = pendmask & (1U << bit_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) vgic_put_irq(vcpu->kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) kfree(intids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) const struct vgic_its_abi *abi = vgic_its_get_abi(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) u64 reg = GITS_TYPER_PLPIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * We use linear CPU numbers for redistributor addressing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * so GITS_TYPER.PTA is 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * Also we force all PROPBASER registers to be the same, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * CommonLPIAff is 0 as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * To avoid memory waste in the guest, we keep the number of IDBits and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * DevBits low - as least for the time being.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return extract_bytes(reg, addr & 7, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) u32 rev = GITS_IIDR_REV(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if (rev >= NR_ITS_ABIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return vgic_its_set_abi(its, rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) switch (addr & 0xffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) case GITS_PIDR0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return 0x92; /* part number, bits[7:0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) case GITS_PIDR1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return 0xb4; /* part number, bits[11:8] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) case GITS_PIDR2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return GIC_PIDR2_ARCH_GICv3 | 0x0b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) case GITS_PIDR4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return 0x40; /* This is a 64K software visible page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) /* The following are the ID registers for (any) GIC. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) case GITS_CIDR0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return 0x0d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) case GITS_CIDR1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return 0xf0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) case GITS_CIDR2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return 0x05;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) case GITS_CIDR3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return 0xb1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static struct vgic_irq *__vgic_its_check_cache(struct vgic_dist *dist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) phys_addr_t db,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) u32 devid, u32 eventid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct vgic_translation_cache_entry *cte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * If we hit a NULL entry, there is nothing after this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (!cte->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (cte->db != db || cte->devid != devid ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) cte->eventid != eventid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * Move this entry to the head, as it is the most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * recently used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (!list_is_first(&cte->entry, &dist->lpi_translation_cache))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) list_move(&cte->entry, &dist->lpi_translation_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return cte->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) u32 devid, u32 eventid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct vgic_dist *dist = &kvm->arch.vgic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) irq = __vgic_its_check_cache(dist, db, devid, eventid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) u32 devid, u32 eventid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct vgic_irq *irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct vgic_dist *dist = &kvm->arch.vgic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct vgic_translation_cache_entry *cte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) phys_addr_t db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /* Do not cache a directly injected interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (irq->hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (unlikely(list_empty(&dist->lpi_translation_cache)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * We could have raced with another CPU caching the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * translation behind our back, so let's check it is not in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) db = its->vgic_its_base + GITS_TRANSLATER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (__vgic_its_check_cache(dist, db, devid, eventid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /* Always reuse the last entry (LRU policy) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) cte = list_last_entry(&dist->lpi_translation_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) typeof(*cte), entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * Caching the translation implies having an extra reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * to the interrupt, so drop the potential reference on what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * was in the cache, and increment it on the new interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (cte->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) __vgic_put_lpi_locked(kvm, cte->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) vgic_get_irq_kref(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) cte->db = db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) cte->devid = devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) cte->eventid = eventid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) cte->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /* Move the new translation to the head of the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) list_move(&cte->entry, &dist->lpi_translation_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) void vgic_its_invalidate_cache(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct vgic_dist *dist = &kvm->arch.vgic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct vgic_translation_cache_entry *cte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * If we hit a NULL entry, there is nothing after this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (!cte->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) __vgic_put_lpi_locked(kvm, cte->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) cte->irq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) u32 devid, u32 eventid, struct vgic_irq **irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct its_ite *ite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (!its->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) ite = find_ite(its, devid, eventid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) if (!ite || !its_is_collection_mapped(ite->collection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) return E_ITS_INT_UNMAPPED_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (!vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return E_ITS_INT_UNMAPPED_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (!vcpu->arch.vgic_cpu.lpis_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) *irq = ite->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) u64 address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct kvm_io_device *kvm_io_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct vgic_io_device *iodev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) if (!vgic_has_its(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (!(msi->flags & KVM_MSI_VALID_DEVID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) address = (u64)msi->address_hi << 32 | msi->address_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (!kvm_io_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) if (kvm_io_dev->ops != &kvm_io_gic_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (iodev->iodev_type != IODEV_ITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return iodev->its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * Find the target VCPU and the LPI number for a given devid/eventid pair
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * and make this IRQ pending, possibly injecting it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * Must be called with the its_lock mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * Returns 0 on success, a positive error value for any ITS mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * related errors and negative error values for generic errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) u32 devid, u32 eventid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct vgic_irq *irq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (irq->hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) return irq_set_irqchip_state(irq->host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) IRQCHIP_STATE_PENDING, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) irq->pending_latch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) vgic_queue_irq_unlock(kvm, irq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) phys_addr_t db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) db = (u64)msi->address_hi << 32 | msi->address_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if (!irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) return -EWOULDBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) raw_spin_lock_irqsave(&irq->irq_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) irq->pending_latch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) vgic_queue_irq_unlock(kvm, irq, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * Queries the KVM IO bus framework to get the ITS pointer from the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) * doorbell address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) * We then call vgic_its_trigger_msi() with the decoded data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * According to the KVM_SIGNAL_MSI API description returns 1 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct vgic_its *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (!vgic_its_inject_cached_translation(kvm, msi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) its = vgic_msi_to_its(kvm, msi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (IS_ERR(its))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return PTR_ERR(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) mutex_lock(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) mutex_unlock(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * if the guest has blocked the MSI. So we map any LPI mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * related error to that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /* Requires the its_lock to be held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) list_del(&ite->ite_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /* This put matches the get in vgic_add_lpi. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (ite->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) if (ite->irq->hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) vgic_put_irq(kvm, ite->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) kfree(ite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) #define its_cmd_get_command(cmd) its_cmd_mask_field(cmd, 0, 0, 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) #define its_cmd_get_deviceid(cmd) its_cmd_mask_field(cmd, 0, 32, 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) #define its_cmd_get_size(cmd) (its_cmd_mask_field(cmd, 1, 0, 5) + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) #define its_cmd_get_id(cmd) its_cmd_mask_field(cmd, 1, 0, 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) #define its_cmd_get_physical_id(cmd) its_cmd_mask_field(cmd, 1, 32, 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) #define its_cmd_get_collection(cmd) its_cmd_mask_field(cmd, 2, 0, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) #define its_cmd_get_ittaddr(cmd) (its_cmd_mask_field(cmd, 2, 8, 44) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) #define its_cmd_get_target_addr(cmd) its_cmd_mask_field(cmd, 2, 16, 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) #define its_cmd_get_validbit(cmd) its_cmd_mask_field(cmd, 2, 63, 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * Must be called with the its_lock mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) u64 *its_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) u32 device_id = its_cmd_get_deviceid(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) u32 event_id = its_cmd_get_id(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct its_ite *ite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ite = find_ite(its, device_id, event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (ite && its_is_collection_mapped(ite->collection)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * Though the spec talks about removing the pending state, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * don't bother here since we clear the ITTE anyway and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * pending state is a property of the ITTE struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) vgic_its_invalidate_cache(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) its_free_ite(kvm, ite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * The MOVI command moves an ITTE to a different collection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * Must be called with the its_lock mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) u64 *its_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) u32 device_id = its_cmd_get_deviceid(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) u32 event_id = its_cmd_get_id(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) u32 coll_id = its_cmd_get_collection(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct its_ite *ite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct its_collection *collection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) ite = find_ite(its, device_id, event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (!ite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return E_ITS_MOVI_UNMAPPED_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (!its_is_collection_mapped(ite->collection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return E_ITS_MOVI_UNMAPPED_COLLECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) collection = find_collection(its, coll_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (!its_is_collection_mapped(collection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return E_ITS_MOVI_UNMAPPED_COLLECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ite->collection = collection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) vcpu = kvm_get_vcpu(kvm, collection->target_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) vgic_its_invalidate_cache(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return update_affinity(ite->irq, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * Check whether an ID can be stored into the corresponding guest table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * For a direct table this is pretty easy, but gets a bit nasty for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * indirect tables. We check whether the resulting guest physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * is actually valid (covered by a memslot and guest accessible).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * For this we have to read the respective first level entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) gpa_t *eaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) int esz = GITS_BASER_ENTRY_SIZE(baser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) int index, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) gfn_t gfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) case GITS_BASER_TYPE_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) case GITS_BASER_TYPE_COLLECTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (id >= BIT_ULL(16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (!(baser & GITS_BASER_INDIRECT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) phys_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (id >= (l1_tbl_size / esz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) addr = base + id * esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) gfn = addr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if (eaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) *eaddr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) /* calculate and check the index into the 1st level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) index = id / (SZ_64K / esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (index >= (l1_tbl_size / sizeof(u64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /* Each 1st level entry is represented by a 64-bit value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (kvm_read_guest_lock(its->dev->kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) base + index * sizeof(indirect_ptr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) &indirect_ptr, sizeof(indirect_ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) indirect_ptr = le64_to_cpu(indirect_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) /* check the valid bit of the first level entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (!(indirect_ptr & BIT_ULL(63)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /* Mask the guest physical address and calculate the frame number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) indirect_ptr &= GENMASK_ULL(51, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /* Find the address of the actual entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) index = id % (SZ_64K / esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) indirect_ptr += index * esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) gfn = indirect_ptr >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (eaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) *eaddr = indirect_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) idx = srcu_read_lock(&its->dev->kvm->srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) srcu_read_unlock(&its->dev->kvm->srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static int vgic_its_alloc_collection(struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct its_collection **colp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) u32 coll_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct its_collection *collection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return E_ITS_MAPC_COLLECTION_OOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) collection = kzalloc(sizeof(*collection), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (!collection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) collection->collection_id = coll_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) collection->target_addr = COLLECTION_NOT_MAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) list_add_tail(&collection->coll_list, &its->collection_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) *colp = collection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct its_collection *collection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct its_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct its_ite *ite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * Clearing the mapping for that collection ID removes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * entry from the list. If there wasn't any before, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * go home early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) collection = find_collection(its, coll_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (!collection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) for_each_lpi_its(device, ite, its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (ite->collection &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) ite->collection->collection_id == coll_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) ite->collection = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) list_del(&collection->coll_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) kfree(collection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /* Must be called with its_lock mutex held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct its_collection *collection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) u32 event_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) struct its_ite *ite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) ite = kzalloc(sizeof(*ite), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (!ite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) ite->event_id = event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) ite->collection = collection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) list_add_tail(&ite->ite_list, &device->itt_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) return ite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * The MAPTI and MAPI commands map LPIs to ITTEs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * Must be called with its_lock mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) u64 *its_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) u32 device_id = its_cmd_get_deviceid(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) u32 event_id = its_cmd_get_id(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) u32 coll_id = its_cmd_get_collection(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct its_ite *ite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct kvm_vcpu *vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct its_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct its_collection *collection, *new_coll = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) int lpi_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) device = find_its_device(its, device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (!device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return E_ITS_MAPTI_UNMAPPED_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (event_id >= BIT_ULL(device->num_eventid_bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return E_ITS_MAPTI_ID_OOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) lpi_nr = its_cmd_get_physical_id(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) lpi_nr = event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (lpi_nr < GIC_LPI_OFFSET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return E_ITS_MAPTI_PHYSICALID_OOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /* If there is an existing mapping, behavior is UNPREDICTABLE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (find_ite(its, device_id, event_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) collection = find_collection(its, coll_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (!collection) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) int ret = vgic_its_alloc_collection(its, &collection, coll_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) new_coll = collection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) ite = vgic_its_alloc_ite(device, collection, event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (IS_ERR(ite)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (new_coll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) vgic_its_free_collection(its, coll_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return PTR_ERR(ite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) if (its_is_collection_mapped(collection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) vcpu = kvm_get_vcpu(kvm, collection->target_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) if (IS_ERR(irq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (new_coll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) vgic_its_free_collection(its, coll_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) its_free_ite(kvm, ite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) return PTR_ERR(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) ite->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* Requires the its_lock to be held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) struct its_ite *ite, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * The spec says that unmapping a device with still valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * since we cannot leave the memory unreferenced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) its_free_ite(kvm, ite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) vgic_its_invalidate_cache(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) list_del(&device->dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) kfree(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /* its lock must be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct its_device *cur, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) vgic_its_free_device(kvm, cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) /* its lock must be held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) struct its_collection *cur, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) vgic_its_free_collection(its, cur->collection_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /* Must be called with its_lock mutex held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) u32 device_id, gpa_t itt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) u8 num_eventid_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) struct its_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) device = kzalloc(sizeof(*device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (!device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) device->device_id = device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) device->itt_addr = itt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) device->num_eventid_bits = num_eventid_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) INIT_LIST_HEAD(&device->itt_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) list_add_tail(&device->dev_list, &its->device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) return device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * Must be called with the its_lock mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) u64 *its_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) u32 device_id = its_cmd_get_deviceid(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) bool valid = its_cmd_get_validbit(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) u8 num_eventid_bits = its_cmd_get_size(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct its_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return E_ITS_MAPD_DEVICE_OOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return E_ITS_MAPD_ITTSIZE_OOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) device = find_its_device(its, device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * The spec says that calling MAPD on an already mapped device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * invalidates all cached data for this device. We implement this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * by removing the mapping and re-establishing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) vgic_its_free_device(kvm, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * The spec does not say whether unmapping a not-mapped device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * is an error, so we are done in any case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (!valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) device = vgic_its_alloc_device(its, device_id, itt_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) num_eventid_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return PTR_ERR_OR_ZERO(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * The MAPC command maps collection IDs to redistributors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * Must be called with the its_lock mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) u64 *its_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) u16 coll_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) u32 target_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) struct its_collection *collection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) bool valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) valid = its_cmd_get_validbit(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) coll_id = its_cmd_get_collection(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) target_addr = its_cmd_get_target_addr(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (target_addr >= atomic_read(&kvm->online_vcpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return E_ITS_MAPC_PROCNUM_OOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (!valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) vgic_its_free_collection(its, coll_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) vgic_its_invalidate_cache(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) collection = find_collection(its, coll_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (!collection) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) ret = vgic_its_alloc_collection(its, &collection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) coll_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) collection->target_addr = target_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) collection->target_addr = target_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) update_affinity_collection(kvm, its, collection);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * The CLEAR command removes the pending state for a particular LPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * Must be called with the its_lock mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) u64 *its_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) u32 device_id = its_cmd_get_deviceid(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) u32 event_id = its_cmd_get_id(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) struct its_ite *ite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) ite = find_ite(its, device_id, event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (!ite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) ite->irq->pending_latch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (ite->irq->hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return irq_set_irqchip_state(ite->irq->host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) IRQCHIP_STATE_PENDING, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * The INV command syncs the configuration bits from the memory table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * Must be called with the its_lock mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) u64 *its_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) u32 device_id = its_cmd_get_deviceid(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) u32 event_id = its_cmd_get_id(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) struct its_ite *ite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) ite = find_ite(its, device_id, event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (!ite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) return E_ITS_INV_UNMAPPED_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) return update_lpi_config(kvm, ite->irq, NULL, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * The INVALL command requests flushing of all IRQ data in this collection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * Find the VCPU mapped to that collection, then iterate over the VM's list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * of mapped LPIs and update the configuration for each IRQ which targets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * the specified vcpu. The configuration will be read from the in-memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * configuration table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * Must be called with the its_lock mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) u64 *its_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) u32 coll_id = its_cmd_get_collection(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) struct its_collection *collection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) u32 *intids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) int irq_count, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) collection = find_collection(its, coll_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (!its_is_collection_mapped(collection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) return E_ITS_INVALL_UNMAPPED_COLLECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) vcpu = kvm_get_vcpu(kvm, collection->target_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (irq_count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) return irq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) for (i = 0; i < irq_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) irq = vgic_get_irq(kvm, NULL, intids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) if (!irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) update_lpi_config(kvm, irq, vcpu, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) vgic_put_irq(kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) kfree(intids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * The MOVALL command moves the pending state of all IRQs targeting one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * redistributor to another. We don't hold the pending state in the VCPUs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * but in the IRQs instead, so there is really not much to do for us here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * However the spec says that no IRQ must target the old redistributor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * afterwards, so we make sure that no LPI is using the associated target_vcpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * This command affects all LPIs in the system that target that redistributor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) u64 *its_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) u32 target1_addr = its_cmd_get_target_addr(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct kvm_vcpu *vcpu1, *vcpu2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) u32 *intids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) int irq_count, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) target2_addr >= atomic_read(&kvm->online_vcpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) return E_ITS_MOVALL_PROCNUM_OOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (target1_addr == target2_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) vcpu1 = kvm_get_vcpu(kvm, target1_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) vcpu2 = kvm_get_vcpu(kvm, target2_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (irq_count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return irq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) for (i = 0; i < irq_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) irq = vgic_get_irq(kvm, NULL, intids[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) update_affinity(irq, vcpu2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) vgic_put_irq(kvm, irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) vgic_its_invalidate_cache(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) kfree(intids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) * The INT command injects the LPI associated with that DevID/EvID pair.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * Must be called with the its_lock mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) u64 *its_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) u32 msi_data = its_cmd_get_id(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) u64 msi_devid = its_cmd_get_deviceid(its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * This function is called with the its_cmd lock held, but the ITS data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * structure lock dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) u64 *its_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) mutex_lock(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) switch (its_cmd_get_command(its_cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) case GITS_CMD_MAPD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) case GITS_CMD_MAPC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) case GITS_CMD_MAPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) case GITS_CMD_MAPTI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) case GITS_CMD_MOVI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) case GITS_CMD_DISCARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) case GITS_CMD_CLEAR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) case GITS_CMD_MOVALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) case GITS_CMD_INT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) case GITS_CMD_INV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) case GITS_CMD_INVALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) case GITS_CMD_SYNC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) /* we ignore this command: we are in sync all of the time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) mutex_unlock(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) static u64 vgic_sanitise_its_baser(u64 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) GITS_BASER_SHAREABILITY_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) vgic_sanitise_shareability);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) GITS_BASER_INNER_CACHEABILITY_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) vgic_sanitise_inner_cacheability);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) GITS_BASER_OUTER_CACHEABILITY_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) vgic_sanitise_outer_cacheability);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /* We support only one (ITS) page size: 64K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) static u64 vgic_sanitise_its_cbaser(u64 reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) GITS_CBASER_SHAREABILITY_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) vgic_sanitise_shareability);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) GITS_CBASER_INNER_CACHEABILITY_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) vgic_sanitise_inner_cacheability);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) vgic_sanitise_outer_cacheability);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) /* Sanitise the physical address to be 64k aligned. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) reg &= ~GENMASK_ULL(15, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) return extract_bytes(its->cbaser, addr & 7, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) /* When GITS_CTLR.Enable is 1, this register is RO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (its->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) mutex_lock(&its->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) its->creadr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * CWRITER is architecturally UNKNOWN on reset, but we need to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * it to CREADR to make sure we start with an empty command buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) its->cwriter = its->creadr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) mutex_unlock(&its->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) #define ITS_CMD_BUFFER_SIZE(baser) ((((baser) & 0xff) + 1) << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) #define ITS_CMD_SIZE 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) /* Must be called with the cmd_lock held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) gpa_t cbaser;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) u64 cmd_buf[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) /* Commands are only processed when the ITS is enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (!its->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) cbaser = GITS_CBASER_ADDRESS(its->cbaser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) while (its->cwriter != its->creadr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) cmd_buf, ITS_CMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * If kvm_read_guest() fails, this could be due to the guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) * programming a bogus value in CBASER or something else going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) * wrong from which we cannot easily recover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) * According to section 6.3.2 in the GICv3 spec we can just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) * ignore that command then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) vgic_its_handle_command(kvm, its, cmd_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) its->creadr += ITS_CMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) its->creadr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * By writing to CWRITER the guest announces new commands to be processed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * To avoid any races in the first place, we take the its_cmd lock, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) * protects our ring buffer variables, so that there is only one user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * per ITS handling commands at a given time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (!its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) mutex_lock(&its->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) reg = ITS_CMD_OFFSET(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) mutex_unlock(&its->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) its->cwriter = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) vgic_its_process_commands(kvm, its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) mutex_unlock(&its->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) return extract_bytes(its->cwriter, addr & 0x7, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return extract_bytes(its->creadr, addr & 0x7, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) u32 cmd_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) mutex_lock(&its->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) if (its->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) cmd_offset = ITS_CMD_OFFSET(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) its->creadr = cmd_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) mutex_unlock(&its->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) switch (BASER_INDEX(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) reg = its->baser_device_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) reg = its->baser_coll_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) return extract_bytes(reg, addr & 7, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) #define GITS_BASER_RO_MASK (GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) static void vgic_mmio_write_its_baser(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) const struct vgic_its_abi *abi = vgic_its_get_abi(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) u64 entry_size, table_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) u64 reg, *regptr, clearbits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) /* When GITS_CTLR.Enable is 1, we ignore write accesses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (its->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) switch (BASER_INDEX(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) regptr = &its->baser_device_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) entry_size = abi->dte_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) table_type = GITS_BASER_TYPE_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) regptr = &its->baser_coll_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) entry_size = abi->cte_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) table_type = GITS_BASER_TYPE_COLLECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) clearbits = GITS_BASER_INDIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) reg = update_64bit_reg(*regptr, addr & 7, len, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) reg &= ~GITS_BASER_RO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) reg &= ~clearbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) reg |= table_type << GITS_BASER_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) reg = vgic_sanitise_its_baser(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) *regptr = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (!(reg & GITS_BASER_VALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /* Take the its_lock to prevent a race with a save/restore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) mutex_lock(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) switch (table_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) case GITS_BASER_TYPE_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) vgic_its_free_device_list(kvm, its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) case GITS_BASER_TYPE_COLLECTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) vgic_its_free_collection_list(kvm, its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) mutex_unlock(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) gpa_t addr, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) u32 reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) mutex_lock(&its->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if (its->creadr == its->cwriter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) reg |= GITS_CTLR_QUIESCENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (its->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) reg |= GITS_CTLR_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) mutex_unlock(&its->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) gpa_t addr, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) mutex_lock(&its->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) * It is UNPREDICTABLE to enable the ITS if any of the CBASER or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * device/collection BASER are invalid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (!its->enabled && (val & GITS_CTLR_ENABLE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) (!(its->baser_device_table & GITS_BASER_VALID) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) !(its->baser_coll_table & GITS_BASER_VALID) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) !(its->cbaser & GITS_CBASER_VALID)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) its->enabled = !!(val & GITS_CTLR_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (!its->enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) vgic_its_invalidate_cache(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * Try to process any pending commands. This function bails out early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * if the ITS is disabled or no commands have been queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) vgic_its_process_commands(kvm, its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) mutex_unlock(&its->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) .reg_offset = off, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) .len = length, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) .access_flags = acc, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) .its_read = rd, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) .its_write = wr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) .reg_offset = off, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) .len = length, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) .access_flags = acc, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) .its_read = rd, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) .its_write = wr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) .uaccess_its_write = uwr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) gpa_t addr, unsigned int len, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) /* Ignore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) static struct vgic_register_region its_registers[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) REGISTER_ITS_DESC(GITS_CTLR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) VGIC_ACCESS_32bit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) vgic_mmio_read_its_iidr, its_mmio_write_wi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) vgic_mmio_uaccess_write_its_iidr, 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) VGIC_ACCESS_32bit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) REGISTER_ITS_DESC(GITS_TYPER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) REGISTER_ITS_DESC(GITS_CBASER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) REGISTER_ITS_DESC(GITS_CWRITER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) vgic_mmio_read_its_creadr, its_mmio_write_wi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) vgic_mmio_uaccess_write_its_creadr, 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) REGISTER_ITS_DESC(GITS_BASER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) REGISTER_ITS_DESC(GITS_IDREGS_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) VGIC_ACCESS_32bit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) /* This is called on setting the LPI enable bit in the redistributor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) void vgic_enable_lpis(struct kvm_vcpu *vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) its_sync_lpi_pending_table(vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) u64 addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) struct vgic_io_device *iodev = &its->iodev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) mutex_lock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) its->vgic_its_base = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) iodev->regions = its_registers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) iodev->nr_regions = ARRAY_SIZE(its_registers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) iodev->base_addr = its->vgic_its_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) iodev->iodev_type = IODEV_ITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) iodev->its = its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) mutex_unlock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) /* Default is 16 cached LPIs per vcpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) #define LPI_DEFAULT_PCPU_CACHE_SIZE 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) void vgic_lpi_translation_cache_init(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) struct vgic_dist *dist = &kvm->arch.vgic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) unsigned int sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) if (!list_empty(&dist->lpi_translation_cache))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) sz = atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) for (i = 0; i < sz; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) struct vgic_translation_cache_entry *cte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) /* An allocation failure is not fatal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) cte = kzalloc(sizeof(*cte), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) if (WARN_ON(!cte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) INIT_LIST_HEAD(&cte->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) list_add(&cte->entry, &dist->lpi_translation_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) struct vgic_dist *dist = &kvm->arch.vgic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) struct vgic_translation_cache_entry *cte, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) vgic_its_invalidate_cache(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) list_for_each_entry_safe(cte, tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) &dist->lpi_translation_cache, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) list_del(&cte->entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) kfree(cte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) #define INITIAL_BASER_VALUE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) (GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) GITS_BASER_PAGE_SIZE_64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) #define INITIAL_PROPBASER_VALUE \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) (GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) static int vgic_its_create(struct kvm_device *dev, u32 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) struct vgic_its *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) if (!its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (vgic_initialized(dev->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) int ret = vgic_v4_init(dev->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) kfree(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) vgic_lpi_translation_cache_init(dev->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) mutex_init(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) mutex_init(&its->cmd_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) its->vgic_its_base = VGIC_ADDR_UNDEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) INIT_LIST_HEAD(&its->device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) INIT_LIST_HEAD(&its->collection_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) dev->kvm->arch.vgic.msis_require_devid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) dev->kvm->arch.vgic.has_its = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) its->enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) its->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) its->baser_device_table = INITIAL_BASER_VALUE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) ((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) its->baser_coll_table = INITIAL_BASER_VALUE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) ((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) dev->private = its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) static void vgic_its_destroy(struct kvm_device *kvm_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) struct kvm *kvm = kvm_dev->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) struct vgic_its *its = kvm_dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) mutex_lock(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) vgic_its_free_device_list(kvm, its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) vgic_its_free_collection_list(kvm, its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) mutex_unlock(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) kfree(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) static int vgic_its_has_attr_regs(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) const struct vgic_register_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) gpa_t offset = attr->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) int align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if (offset & align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) region = vgic_find_mmio_region(its_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) ARRAY_SIZE(its_registers),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) if (!region)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) static int vgic_its_attr_regs_access(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) struct kvm_device_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) u64 *reg, bool is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) const struct vgic_register_region *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) struct vgic_its *its;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) gpa_t addr, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) int align, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) its = dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) offset = attr->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * Although the spec supports upper/lower 32-bit accesses to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) * 64-bit ITS registers, the userspace ABI requires 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * accesses to all 64-bit wide registers. We therefore only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) align = 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) align = 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) if (offset & align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) mutex_lock(&dev->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) region = vgic_find_mmio_region(its_registers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) ARRAY_SIZE(its_registers),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) if (!region) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (!lock_all_vcpus(dev->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) addr = its->vgic_its_base + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) if (is_write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) if (region->uaccess_its_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) ret = region->uaccess_its_write(dev->kvm, its, addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) len, *reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) region->its_write(dev->kvm, its, addr, len, *reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) *reg = region->its_read(dev->kvm, its, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) unlock_all_vcpus(dev->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) mutex_unlock(&dev->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) static u32 compute_next_devid_offset(struct list_head *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) struct its_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) struct its_device *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) u32 next_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) if (list_is_last(&dev->dev_list, h))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) next = list_next_entry(dev, dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) next_offset = next->device_id - dev->device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) struct its_ite *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) u32 next_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) if (list_is_last(&ite->ite_list, h))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) next = list_next_entry(ite, ite_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) next_offset = next->event_id - ite->event_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) * entry_fn_t - Callback called on a table entry restore path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) * @its: its handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) * @id: id of the entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) * @entry: pointer to the entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) * @opaque: pointer to an opaque data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) * Return: < 0 on error, 0 if last element was identified, id offset to next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) * element otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) void *opaque);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) * scan_its_table - Scan a contiguous table in guest RAM and applies a function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) * to each entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) * @its: its handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) * @base: base gpa of the table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) * @size: size of the table in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) * @esz: entry size in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * @start_id: the ID of the first entry in the table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) * (non zero for 2d level tables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * @fn: function to apply on each entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) * Return: < 0 on error, 0 if last element was identified, 1 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) * (the last element may not be found on second level tables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) int start_id, entry_fn_t fn, void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) struct kvm *kvm = its->dev->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) unsigned long len = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) int id = start_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) gpa_t gpa = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) char entry[ESZ_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) memset(entry, 0, esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) while (len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) int next_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) size_t byte_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) next_offset = fn(its, id, entry, opaque);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (next_offset <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) return next_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) byte_offset = next_offset * esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) id += next_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) gpa += byte_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) len -= byte_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) * vgic_its_save_ite - Save an interrupt translation entry at @gpa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) struct its_ite *ite, gpa_t gpa, int ite_esz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) struct kvm *kvm = its->dev->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) u32 next_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) ite->collection->collection_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) val = cpu_to_le64(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) * vgic_its_restore_ite - restore an interrupt translation entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) * @event_id: id used for indexing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) * @ptr: pointer to the ITE entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) * @opaque: pointer to the its_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) void *ptr, void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) struct its_device *dev = (struct its_device *)opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) struct its_collection *collection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) struct kvm *kvm = its->dev->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) struct kvm_vcpu *vcpu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) u64 *p = (u64 *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) struct vgic_irq *irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) u32 coll_id, lpi_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) struct its_ite *ite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) val = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) val = le64_to_cpu(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) coll_id = val & KVM_ITS_ITE_ICID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) lpi_id = (val & KVM_ITS_ITE_PINTID_MASK) >> KVM_ITS_ITE_PINTID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) if (!lpi_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) return 1; /* invalid entry, no choice but to scan next entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (lpi_id < VGIC_MIN_LPI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) offset = val >> KVM_ITS_ITE_NEXT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) if (event_id + offset >= BIT_ULL(dev->num_eventid_bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) collection = find_collection(its, coll_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) if (!collection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) ite = vgic_its_alloc_ite(dev, collection, event_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (IS_ERR(ite))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) return PTR_ERR(ite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) if (its_is_collection_mapped(collection))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) vcpu = kvm_get_vcpu(kvm, collection->target_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) irq = vgic_add_lpi(kvm, lpi_id, vcpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if (IS_ERR(irq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) return PTR_ERR(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) ite->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) static int vgic_its_ite_cmp(void *priv, struct list_head *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) struct list_head *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) struct its_ite *itea = container_of(a, struct its_ite, ite_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) struct its_ite *iteb = container_of(b, struct its_ite, ite_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) if (itea->event_id < iteb->event_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) const struct vgic_its_abi *abi = vgic_its_get_abi(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) gpa_t base = device->itt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) struct its_ite *ite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) int ite_esz = abi->ite_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) list_sort(NULL, &device->itt_head, vgic_its_ite_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) list_for_each_entry(ite, &device->itt_head, ite_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) gpa_t gpa = base + ite->event_id * ite_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) * If an LPI carries the HW bit, this means that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) * interrupt is controlled by GICv4, and we do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) * have direct access to that state. Let's simply fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) * the save operation...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) if (ite->irq->hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) * vgic_its_restore_itt - restore the ITT of a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * @its: its handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) * @dev: device handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) * Return 0 on success, < 0 on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) const struct vgic_its_abi *abi = vgic_its_get_abi(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) gpa_t base = dev->itt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) int ite_esz = abi->ite_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) ret = scan_its_table(its, base, max_size, ite_esz, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) vgic_its_restore_ite, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) /* scan_its_table returns +1 if all ITEs are invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) * vgic_its_save_dte - Save a device table entry at a given GPA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) * @its: ITS handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) * @dev: ITS device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) * @ptr: GPA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) gpa_t ptr, int dte_esz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) struct kvm *kvm = its->dev->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) u64 val, itt_addr_field;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) u32 next_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) itt_addr_field = dev->itt_addr >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) next_offset = compute_next_devid_offset(&its->device_list, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) val = (1ULL << KVM_ITS_DTE_VALID_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) ((u64)next_offset << KVM_ITS_DTE_NEXT_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) (dev->num_eventid_bits - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) val = cpu_to_le64(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) * vgic_its_restore_dte - restore a device table entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) * @its: its handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) * @id: device id the DTE corresponds to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) * @ptr: kernel VA where the 8 byte DTE is located
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) * @opaque: unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) * Return: < 0 on error, 0 if the dte is the last one, id offset to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) * next dte otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) void *ptr, void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) struct its_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) gpa_t itt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) u8 num_eventid_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) u64 entry = *(u64 *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) bool valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) entry = le64_to_cpu(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) valid = entry >> KVM_ITS_DTE_VALID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) num_eventid_bits = (entry & KVM_ITS_DTE_SIZE_MASK) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) itt_addr = ((entry & KVM_ITS_DTE_ITTADDR_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) >> KVM_ITS_DTE_ITTADDR_SHIFT) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (!valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) /* dte entry is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) offset = (entry & KVM_ITS_DTE_NEXT_MASK) >> KVM_ITS_DTE_NEXT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) if (IS_ERR(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) return PTR_ERR(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) ret = vgic_its_restore_itt(its, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) vgic_its_free_device(its->dev->kvm, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) static int vgic_its_device_cmp(void *priv, struct list_head *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) struct list_head *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) struct its_device *deva = container_of(a, struct its_device, dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) struct its_device *devb = container_of(b, struct its_device, dev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) if (deva->device_id < devb->device_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) * vgic_its_save_device_tables - Save the device table and all ITT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) * into guest RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) * returns the GPA of the device entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) static int vgic_its_save_device_tables(struct vgic_its *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) const struct vgic_its_abi *abi = vgic_its_get_abi(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) u64 baser = its->baser_device_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) struct its_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) int dte_esz = abi->dte_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) if (!(baser & GITS_BASER_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) list_sort(NULL, &its->device_list, vgic_its_device_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) list_for_each_entry(dev, &its->device_list, dev_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) gpa_t eaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) if (!vgic_its_check_id(its, baser,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) dev->device_id, &eaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) ret = vgic_its_save_itt(its, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) * handle_l1_dte - callback used for L1 device table entries (2 stage case)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) * @its: its handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) * @id: index of the entry in the L1 table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) * @addr: kernel VA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) * @opaque: unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) * L1 table entries are scanned by steps of 1 entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) * Return < 0 if error, 0 if last dte was found when scanning the L2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) * table, +1 otherwise (meaning next L1 entry must be scanned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) const struct vgic_its_abi *abi = vgic_its_get_abi(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) int l2_start_id = id * (SZ_64K / abi->dte_esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) u64 entry = *(u64 *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) int dte_esz = abi->dte_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) gpa_t gpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) entry = le64_to_cpu(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) if (!(entry & KVM_ITS_L1E_VALID_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) gpa = entry & KVM_ITS_L1E_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) l2_start_id, vgic_its_restore_dte, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) * vgic_its_restore_device_tables - Restore the device table and all ITT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) * from guest RAM to internal data structs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) static int vgic_its_restore_device_tables(struct vgic_its *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) const struct vgic_its_abi *abi = vgic_its_get_abi(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) u64 baser = its->baser_device_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) int l1_esz, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) gpa_t l1_gpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) if (!(baser & GITS_BASER_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) l1_gpa = GITS_BASER_ADDR_48_to_52(baser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) if (baser & GITS_BASER_INDIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) l1_esz = GITS_LVL1_ENTRY_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) handle_l1_dte, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) l1_esz = abi->dte_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) vgic_its_restore_dte, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) /* scan_its_table returns +1 if all entries are invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) static int vgic_its_save_cte(struct vgic_its *its,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) struct its_collection *collection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) gpa_t gpa, int esz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) val = (1ULL << KVM_ITS_CTE_VALID_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) collection->collection_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) val = cpu_to_le64(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) struct its_collection *collection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) struct kvm *kvm = its->dev->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) u32 target_addr, coll_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) BUG_ON(esz > sizeof(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) val = le64_to_cpu(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) if (!(val & KVM_ITS_CTE_VALID_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) coll_id = val & KVM_ITS_CTE_ICID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) if (target_addr != COLLECTION_NOT_MAPPED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) target_addr >= atomic_read(&kvm->online_vcpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) collection = find_collection(its, coll_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) if (collection)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) ret = vgic_its_alloc_collection(its, &collection, coll_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) collection->target_addr = target_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) * vgic_its_save_collection_table - Save the collection table into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) * guest RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) static int vgic_its_save_collection_table(struct vgic_its *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) const struct vgic_its_abi *abi = vgic_its_get_abi(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) u64 baser = its->baser_coll_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) struct its_collection *collection;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) size_t max_size, filled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) int ret, cte_esz = abi->cte_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) if (!(baser & GITS_BASER_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) list_for_each_entry(collection, &its->collection_list, coll_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) gpa += cte_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) filled += cte_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) if (filled == max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) * table is not fully filled, add a last dummy element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) * with valid bit unset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) BUG_ON(cte_esz > sizeof(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) * vgic_its_restore_collection_table - reads the collection table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) * in guest memory and restores the ITS internal state. Requires the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) * BASER registers to be restored before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) static int vgic_its_restore_collection_table(struct vgic_its *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) const struct vgic_its_abi *abi = vgic_its_get_abi(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) u64 baser = its->baser_coll_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) int cte_esz = abi->cte_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) size_t max_size, read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) gpa_t gpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) if (!(baser & GITS_BASER_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) gpa = GITS_BASER_ADDR_48_to_52(baser);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) while (read < max_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) ret = vgic_its_restore_cte(its, gpa, cte_esz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) gpa += cte_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) read += cte_esz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) * according to v0 ABI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) static int vgic_its_save_tables_v0(struct vgic_its *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) ret = vgic_its_save_device_tables(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) return vgic_its_save_collection_table(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) * to internal data structs according to V0 ABI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) static int vgic_its_restore_tables_v0(struct vgic_its *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) ret = vgic_its_restore_collection_table(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) return vgic_its_restore_device_tables(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) static int vgic_its_commit_v0(struct vgic_its *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) const struct vgic_its_abi *abi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) abi = vgic_its_get_abi(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) << GITS_BASER_ENTRY_SIZE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) << GITS_BASER_ENTRY_SIZE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) /* We need to keep the ABI specific field values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) its->baser_coll_table &= ~GITS_BASER_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) its->baser_device_table &= ~GITS_BASER_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) its->cbaser = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) its->creadr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) its->cwriter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) its->enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) vgic_its_free_device_list(kvm, its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) vgic_its_free_collection_list(kvm, its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) static int vgic_its_has_attr(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) case KVM_DEV_ARM_VGIC_GRP_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) case KVM_VGIC_ITS_ADDR_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) case KVM_DEV_ARM_VGIC_GRP_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) case KVM_DEV_ARM_VGIC_CTRL_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) case KVM_DEV_ARM_ITS_CTRL_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) case KVM_DEV_ARM_ITS_SAVE_TABLES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) case KVM_DEV_ARM_ITS_RESTORE_TABLES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) return vgic_its_has_attr_regs(dev, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) const struct vgic_its_abi *abi = vgic_its_get_abi(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) mutex_lock(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) if (!lock_all_vcpus(kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) mutex_unlock(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) switch (attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) case KVM_DEV_ARM_ITS_CTRL_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) vgic_its_reset(kvm, its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) case KVM_DEV_ARM_ITS_SAVE_TABLES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) ret = abi->save_tables(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) case KVM_DEV_ARM_ITS_RESTORE_TABLES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) ret = abi->restore_tables(its);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) unlock_all_vcpus(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) mutex_unlock(&its->its_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) static int vgic_its_set_attr(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) struct vgic_its *its = dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) case KVM_DEV_ARM_VGIC_GRP_ADDR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) u64 __user *uaddr = (u64 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) unsigned long type = (unsigned long)attr->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) if (type != KVM_VGIC_ITS_ADDR_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) if (copy_from_user(&addr, uaddr, sizeof(addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) addr, SZ_64K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) return vgic_register_its_iodev(dev->kvm, its, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) case KVM_DEV_ARM_VGIC_GRP_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) return vgic_its_ctrl(dev->kvm, its, attr->attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) u64 __user *uaddr = (u64 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) if (get_user(reg, uaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) return vgic_its_attr_regs_access(dev, attr, ®, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) static int vgic_its_get_attr(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) case KVM_DEV_ARM_VGIC_GRP_ADDR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) struct vgic_its *its = dev->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) u64 addr = its->vgic_its_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) u64 __user *uaddr = (u64 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) unsigned long type = (unsigned long)attr->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) if (type != KVM_VGIC_ITS_ADDR_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) if (copy_to_user(uaddr, &addr, sizeof(addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) u64 __user *uaddr = (u64 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) ret = vgic_its_attr_regs_access(dev, attr, ®, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) return put_user(reg, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) static struct kvm_device_ops kvm_arm_vgic_its_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) .name = "kvm-arm-vgic-its",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) .create = vgic_its_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) .destroy = vgic_its_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) .set_attr = vgic_its_set_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) .get_attr = vgic_its_get_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) .has_attr = vgic_its_has_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) int kvm_vgic_register_its_device(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) KVM_DEV_TYPE_ARM_VGIC_ITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) }