^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * VGIC: KVM DEVICE API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2015 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Marc Zyngier <marc.zyngier@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <kvm/arm_vgic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/kvm_mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "vgic.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /* common helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) phys_addr_t addr, phys_addr_t alignment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) if (addr & ~kvm_phys_mask(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) if (!IS_ALIGNED(addr, alignment))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static int vgic_check_type(struct kvm *kvm, int type_needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (kvm->arch.vgic.vgic_model != type_needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * kvm_vgic_addr - set or get vgic VM base addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * @kvm: pointer to the vm struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * @type: the VGIC addr type, one of KVM_VGIC_V[23]_ADDR_TYPE_XXX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * @addr: pointer to address value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * @write: if true set the address in the VM address space, if false read the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * Set or get the vgic base addresses for the distributor and the virtual CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * interface in the VM physical address space. These addresses are properties
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * of the emulated core/SoC and therefore user space initially knows this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Check them for sanity (alignment, double assignment). We can't check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * overlapping regions in case of a virtual GICv3 here, since we don't know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * the number of VCPUs yet, so we defer this check to map_resources().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int r = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct vgic_dist *vgic = &kvm->arch.vgic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) phys_addr_t *addr_ptr, alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) u64 undef_value = VGIC_ADDR_UNDEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) mutex_lock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) case KVM_VGIC_V2_ADDR_TYPE_DIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) addr_ptr = &vgic->vgic_dist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) alignment = SZ_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) case KVM_VGIC_V2_ADDR_TYPE_CPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) addr_ptr = &vgic->vgic_cpu_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) alignment = SZ_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) case KVM_VGIC_V3_ADDR_TYPE_DIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) addr_ptr = &vgic->vgic_dist_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) alignment = SZ_64K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) case KVM_VGIC_V3_ADDR_TYPE_REDIST: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct vgic_redist_region *rdreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) rdreg = list_first_entry_or_null(&vgic->rd_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct vgic_redist_region, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (!rdreg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) addr_ptr = &undef_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) addr_ptr = &rdreg->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct vgic_redist_region *rdreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) u8 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) index = *addr & KVM_VGIC_V3_RDIST_INDEX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) gpa_t base = *addr & KVM_VGIC_V3_RDIST_BASE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u32 count = (*addr & KVM_VGIC_V3_RDIST_COUNT_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) >> KVM_VGIC_V3_RDIST_COUNT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u8 flags = (*addr & KVM_VGIC_V3_RDIST_FLAGS_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) >> KVM_VGIC_V3_RDIST_FLAGS_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!count || flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) r = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) r = vgic_v3_set_redist_base(kvm, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) base, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) rdreg = vgic_v3_rdist_region_from_index(kvm, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (!rdreg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) r = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *addr = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) *addr |= rdreg->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) *addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) r = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (!r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *addr_ptr = *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *addr = *addr_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) mutex_unlock(&kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static int vgic_set_common_attr(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) case KVM_DEV_ARM_VGIC_GRP_ADDR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u64 __user *uaddr = (u64 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) unsigned long type = (unsigned long)attr->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (copy_from_user(&addr, uaddr, sizeof(addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) r = kvm_vgic_addr(dev->kvm, type, &addr, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return (r == -ENODEV) ? -ENXIO : r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u32 __user *uaddr = (u32 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (get_user(val, uaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * We require:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * - at most 1024 interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * - a multiple of 32 interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (val < (VGIC_NR_PRIVATE_IRQS + 32) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) val > VGIC_MAX_RESERVED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) (val & 31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) mutex_lock(&dev->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) dev->kvm->arch.vgic.nr_spis =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) val - VGIC_NR_PRIVATE_IRQS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) mutex_unlock(&dev->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) case KVM_DEV_ARM_VGIC_GRP_CTRL: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) case KVM_DEV_ARM_VGIC_CTRL_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) mutex_lock(&dev->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) r = vgic_init(dev->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) mutex_unlock(&dev->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static int vgic_get_common_attr(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int r = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) case KVM_DEV_ARM_VGIC_GRP_ADDR: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) u64 __user *uaddr = (u64 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) u64 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unsigned long type = (unsigned long)attr->attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (copy_from_user(&addr, uaddr, sizeof(addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) r = kvm_vgic_addr(dev->kvm, type, &addr, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return (r == -ENODEV) ? -ENXIO : r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (copy_to_user(uaddr, &addr, sizeof(addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) u32 __user *uaddr = (u32 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) r = put_user(dev->kvm->arch.vgic.nr_spis +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) VGIC_NR_PRIVATE_IRQS, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static int vgic_create(struct kvm_device *dev, u32 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return kvm_vgic_create(dev->kvm, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static void vgic_destroy(struct kvm_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) kfree(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int kvm_register_vgic_device(unsigned long type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) case KVM_DEV_TYPE_ARM_VGIC_V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) KVM_DEV_TYPE_ARM_VGIC_V2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) case KVM_DEV_TYPE_ARM_VGIC_V3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) KVM_DEV_TYPE_ARM_VGIC_V3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ret = kvm_vgic_register_its_device();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) struct vgic_reg_attr *reg_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) int cpuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) KVM_DEV_ARM_VGIC_CPUID_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* unlocks vcpus from @vcpu_lock_idx and smaller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct kvm_vcpu *tmp_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) mutex_unlock(&tmp_vcpu->mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) void unlock_all_vcpus(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /* Returns true if all vcpus were locked, false otherwise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) bool lock_all_vcpus(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct kvm_vcpu *tmp_vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * Any time a vcpu is run, vcpu_load is called which tries to grab the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * that no other VCPUs are run and fiddle with the vgic state while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * access it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (!mutex_trylock(&tmp_vcpu->mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) unlock_vcpus(kvm, c - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * @dev: kvm device handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * @attr: kvm device attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * @reg: address the value is read or written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * @is_write: true if userspace is writing a register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static int vgic_v2_attr_regs_access(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct kvm_device_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) u32 *reg, bool is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct vgic_reg_attr reg_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) gpa_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ret = vgic_v2_parse_attr(dev, attr, ®_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) vcpu = reg_attr.vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) addr = reg_attr.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) mutex_lock(&dev->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ret = vgic_init(dev->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (!lock_all_vcpus(dev->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unlock_all_vcpus(dev->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) mutex_unlock(&dev->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static int vgic_v2_set_attr(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) ret = vgic_set_common_attr(dev, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (ret != -ENXIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) u32 __user *uaddr = (u32 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (get_user(reg, uaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return vgic_v2_attr_regs_access(dev, attr, ®, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static int vgic_v2_get_attr(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) ret = vgic_get_common_attr(dev, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (ret != -ENXIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) u32 __user *uaddr = (u32 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) u32 reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ret = vgic_v2_attr_regs_access(dev, attr, ®, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return put_user(reg, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static int vgic_v2_has_attr(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) case KVM_DEV_ARM_VGIC_GRP_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) case KVM_VGIC_V2_ADDR_TYPE_DIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) case KVM_VGIC_V2_ADDR_TYPE_CPU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return vgic_v2_has_attr_regs(dev, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) case KVM_DEV_ARM_VGIC_GRP_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) case KVM_DEV_ARM_VGIC_CTRL_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct kvm_device_ops kvm_arm_vgic_v2_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) .name = "kvm-arm-vgic-v2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) .create = vgic_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) .destroy = vgic_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) .set_attr = vgic_v2_set_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) .get_attr = vgic_v2_get_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) .has_attr = vgic_v2_has_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct vgic_reg_attr *reg_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) unsigned long vgic_mpidr, mpidr_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * attr might not hold MPIDR. Hence assume vcpu0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!reg_attr->vcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * @dev: kvm device handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * @attr: kvm device attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * @reg: address the value is read or written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * @is_write: true if userspace is writing a register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) static int vgic_v3_attr_regs_access(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) struct kvm_device_attr *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) u64 *reg, bool is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct vgic_reg_attr reg_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) gpa_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct kvm_vcpu *vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) u32 tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) ret = vgic_v3_parse_attr(dev, attr, ®_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) vcpu = reg_attr.vcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) addr = reg_attr.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) mutex_lock(&dev->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (unlikely(!vgic_initialized(dev->kvm))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (!lock_all_vcpus(dev->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) tmp32 = *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &tmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (!is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) *reg = tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) tmp32 = *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &tmp32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (!is_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) *reg = tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) u64 regid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) regid = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ret = vgic_v3_cpu_sysregs_uaccess(vcpu, is_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) regid, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) unsigned int info, intid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (info == VGIC_LEVEL_INFO_LINE_LEVEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) intid = attr->attr &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) ret = vgic_v3_line_level_info_uaccess(vcpu, is_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) intid, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) unlock_all_vcpus(dev->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) mutex_unlock(&dev->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static int vgic_v3_set_attr(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ret = vgic_set_common_attr(dev, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (ret != -ENXIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) u32 __user *uaddr = (u32 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) u32 tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (get_user(tmp32, uaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) reg = tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return vgic_v3_attr_regs_access(dev, attr, ®, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) u64 __user *uaddr = (u64 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (get_user(reg, uaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return vgic_v3_attr_regs_access(dev, attr, ®, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) u32 __user *uaddr = (u32 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) u32 tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (get_user(tmp32, uaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) reg = tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return vgic_v3_attr_regs_access(dev, attr, ®, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) case KVM_DEV_ARM_VGIC_GRP_CTRL: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) mutex_lock(&dev->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (!lock_all_vcpus(dev->kvm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) mutex_unlock(&dev->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) ret = vgic_v3_save_pending_tables(dev->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) unlock_all_vcpus(dev->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) mutex_unlock(&dev->kvm->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static int vgic_v3_get_attr(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) ret = vgic_get_common_attr(dev, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (ret != -ENXIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) u32 __user *uaddr = (u32 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) u32 tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) tmp32 = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) return put_user(tmp32, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) u64 __user *uaddr = (u64 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return put_user(reg, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) u32 __user *uaddr = (u32 __user *)(long)attr->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) u64 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) u32 tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) ret = vgic_v3_attr_regs_access(dev, attr, ®, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) tmp32 = reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return put_user(tmp32, uaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static int vgic_v3_has_attr(struct kvm_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct kvm_device_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) switch (attr->group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) case KVM_DEV_ARM_VGIC_GRP_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) case KVM_VGIC_V3_ADDR_TYPE_DIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) case KVM_VGIC_V3_ADDR_TYPE_REDIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return vgic_v3_has_attr_regs(dev, attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) VGIC_LEVEL_INFO_LINE_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) case KVM_DEV_ARM_VGIC_GRP_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) switch (attr->attr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) case KVM_DEV_ARM_VGIC_CTRL_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct kvm_device_ops kvm_arm_vgic_v3_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) .name = "kvm-arm-vgic-v3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) .create = vgic_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) .destroy = vgic_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) .set_attr = vgic_v3_set_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) .get_attr = vgic_v3_get_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) .has_attr = vgic_v3_has_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) };