Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * kvm eventfd support - use eventfd objects to signal various KVM events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright 2009 Novell.  All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Author:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *	Gregory Haskins <ghaskins@novell.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/kvm_irqfd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/eventfd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/srcu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/seqlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/irqbypass.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <trace/events/kvm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <kvm/iodev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #ifdef CONFIG_HAVE_KVM_IRQFD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) static struct workqueue_struct *irqfd_cleanup_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) bool __attribute__((weak))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) irqfd_inject(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct kvm_kernel_irqfd *irqfd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		container_of(work, struct kvm_kernel_irqfd, inject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	struct kvm *kvm = irqfd->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	if (!irqfd->resampler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 				false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 				false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 			    irqfd->gsi, 1, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * Since resampler irqfds share an IRQ source ID, we de-assert once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * then notify all of the resampler irqfds using this GSI.  We can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * do multiple de-asserts or we risk racing with incoming re-asserts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct kvm_kernel_irqfd_resampler *resampler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct kvm *kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct kvm_kernel_irqfd *irqfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	resampler = container_of(kian,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 			struct kvm_kernel_irqfd_resampler, notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	kvm = resampler->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		    resampler->notifier.gsi, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	idx = srcu_read_lock(&kvm->irq_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		eventfd_signal(irqfd->resamplefd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	srcu_read_unlock(&kvm->irq_srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	struct kvm *kvm = resampler->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	mutex_lock(&kvm->irqfds.resampler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	list_del_rcu(&irqfd->resampler_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	synchronize_srcu(&kvm->irq_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	if (list_empty(&resampler->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		list_del(&resampler->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 			    resampler->notifier.gsi, 0, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		kfree(resampler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	mutex_unlock(&kvm->irqfds.resampler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * Race-free decouple logic (ordering is critical)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) irqfd_shutdown(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct kvm_kernel_irqfd *irqfd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		container_of(work, struct kvm_kernel_irqfd, shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct kvm *kvm = irqfd->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	u64 cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	/* Make sure irqfd has been initialized in assign path. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	synchronize_srcu(&kvm->irq_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	 * Synchronize with the wait-queue and unhook ourselves to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	 * further events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	 * We know no new events will be scheduled at this point, so block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	 * until all previously outstanding events have completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	flush_work(&irqfd->inject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	if (irqfd->resampler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		irqfd_resampler_shutdown(irqfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		eventfd_ctx_put(irqfd->resamplefd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	 * It is now safe to release the object's resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	irq_bypass_unregister_consumer(&irqfd->consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	eventfd_ctx_put(irqfd->eventfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	kfree(irqfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* assumes kvm->irqfds.lock is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	return list_empty(&irqfd->list) ? false : true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * Mark the irqfd as inactive and schedule it for removal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * assumes kvm->irqfds.lock is held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	BUG_ON(!irqfd_is_active(irqfd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	list_del_init(&irqfd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int __attribute__((weak)) kvm_arch_set_irq_inatomic(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 				struct kvm_kernel_irq_routing_entry *irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 				struct kvm *kvm, int irq_source_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 				int level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 				bool line_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	return -EWOULDBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  * Called with wqh->lock held and interrupts disabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	struct kvm_kernel_irqfd *irqfd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		container_of(wait, struct kvm_kernel_irqfd, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	__poll_t flags = key_to_poll(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	struct kvm_kernel_irq_routing_entry irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	struct kvm *kvm = irqfd->kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	unsigned seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	if (flags & EPOLLIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		idx = srcu_read_lock(&kvm->irq_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			seq = read_seqcount_begin(&irqfd->irq_entry_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			irq = irqfd->irq_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		} while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		/* An event has been signaled, inject an interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		if (kvm_arch_set_irq_inatomic(&irq, kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 					      KVM_USERSPACE_IRQ_SOURCE_ID, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 					      false) == -EWOULDBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			schedule_work(&irqfd->inject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		srcu_read_unlock(&kvm->irq_srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	if (flags & EPOLLHUP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		/* The eventfd is closing, detach from KVM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		spin_lock_irqsave(&kvm->irqfds.lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		 * We must check if someone deactivated the irqfd before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		 * we could acquire the irqfds.lock since the item is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		 * deactivated from the KVM side before it is unhooked from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		 * the wait-queue.  If it is already deactivated, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		 * simply return knowing the other side will cleanup for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		 * We cannot race against the irqfd going away since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		 * other side is required to acquire wqh->lock, which we hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		if (irqfd_is_active(irqfd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			irqfd_deactivate(irqfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		spin_unlock_irqrestore(&kvm->irqfds.lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			poll_table *pt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	struct kvm_kernel_irqfd *irqfd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		container_of(pt, struct kvm_kernel_irqfd, pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	add_wait_queue(wqh, &irqfd->wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* Must be called under irqfds.lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	struct kvm_kernel_irq_routing_entry *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	int n_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	write_seqcount_begin(&irqfd->irq_entry_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	e = entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (n_entries == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		irqfd->irq_entry = *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		irqfd->irq_entry.type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	write_seqcount_end(&irqfd->irq_entry_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) void __attribute__((weak)) kvm_arch_irq_bypass_stop(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 				struct irq_bypass_consumer *cons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) void __attribute__((weak)) kvm_arch_irq_bypass_start(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 				struct irq_bypass_consumer *cons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) int  __attribute__((weak)) kvm_arch_update_irqfd_routing(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 				struct kvm *kvm, unsigned int host_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 				uint32_t guest_irq, bool set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	struct kvm_kernel_irqfd *irqfd, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	__poll_t events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	if (!kvm_arch_intc_initialized(kvm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	if (!kvm_arch_irqfd_allowed(kvm, args))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	if (!irqfd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	irqfd->kvm = kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	irqfd->gsi = args->gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	INIT_LIST_HEAD(&irqfd->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	INIT_WORK(&irqfd->inject, irqfd_inject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	seqcount_spinlock_init(&irqfd->irq_entry_sc, &kvm->irqfds.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	f = fdget(args->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	if (!f.file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		ret = -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	eventfd = eventfd_ctx_fileget(f.file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	if (IS_ERR(eventfd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		ret = PTR_ERR(eventfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	irqfd->eventfd = eventfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		struct kvm_kernel_irqfd_resampler *resampler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		resamplefd = eventfd_ctx_fdget(args->resamplefd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		if (IS_ERR(resamplefd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 			ret = PTR_ERR(resamplefd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		irqfd->resamplefd = resamplefd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		INIT_LIST_HEAD(&irqfd->resampler_link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		mutex_lock(&kvm->irqfds.resampler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		list_for_each_entry(resampler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 				    &kvm->irqfds.resampler_list, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			if (resampler->notifier.gsi == irqfd->gsi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 				irqfd->resampler = resampler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		if (!irqfd->resampler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			resampler = kzalloc(sizeof(*resampler),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 					    GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			if (!resampler) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 				ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 				mutex_unlock(&kvm->irqfds.resampler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 				goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 			resampler->kvm = kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			INIT_LIST_HEAD(&resampler->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			resampler->notifier.gsi = irqfd->gsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			resampler->notifier.irq_acked = irqfd_resampler_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			INIT_LIST_HEAD(&resampler->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			list_add(&resampler->link, &kvm->irqfds.resampler_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 			kvm_register_irq_ack_notifier(kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 						      &resampler->notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			irqfd->resampler = resampler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		synchronize_srcu(&kvm->irq_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		mutex_unlock(&kvm->irqfds.resampler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	 * Install our own custom wake-up handling so we are notified via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	 * a callback whenever someone signals the underlying eventfd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	spin_lock_irq(&kvm->irqfds.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	list_for_each_entry(tmp, &kvm->irqfds.items, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		if (irqfd->eventfd != tmp->eventfd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		/* This fd is used for another irq already. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		spin_unlock_irq(&kvm->irqfds.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	idx = srcu_read_lock(&kvm->irq_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	irqfd_update(kvm, irqfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	list_add_tail(&irqfd->list, &kvm->irqfds.items);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	spin_unlock_irq(&kvm->irqfds.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	 * Check if there was an event already pending on the eventfd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	 * before we registered, and trigger it as if we didn't miss it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	events = vfs_poll(f.file, &irqfd->pt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	if (events & EPOLLIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		schedule_work(&irqfd->inject);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	if (kvm_arch_has_irq_bypass()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		irqfd->consumer.token = (void *)irqfd->eventfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		irqfd->consumer.start = kvm_arch_irq_bypass_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		ret = irq_bypass_register_consumer(&irqfd->consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 			pr_info("irq bypass consumer (token %p) registration fails: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 				irqfd->consumer.token, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	srcu_read_unlock(&kvm->irq_srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	 * do not drop the file until the irqfd is fully initialized, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	 * we might race against the EPOLLHUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	if (irqfd->resampler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		irqfd_resampler_shutdown(irqfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	if (resamplefd && !IS_ERR(resamplefd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		eventfd_ctx_put(resamplefd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	if (eventfd && !IS_ERR(eventfd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		eventfd_ctx_put(eventfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	kfree(irqfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	struct kvm_irq_ack_notifier *kian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	int gsi, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	idx = srcu_read_lock(&kvm->irq_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	if (gsi != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 					  link, srcu_read_lock_held(&kvm->irq_srcu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			if (kian->gsi == gsi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 				srcu_read_unlock(&kvm->irq_srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	srcu_read_unlock(&kvm->irq_srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	struct kvm_irq_ack_notifier *kian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	hlist_for_each_entry_srcu(kian, &kvm->irq_ack_notifier_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 				  link, srcu_read_lock_held(&kvm->irq_srcu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		if (kian->gsi == gsi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 			kian->irq_acked(kian);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	int gsi, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	trace_kvm_ack_irq(irqchip, pin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	idx = srcu_read_lock(&kvm->irq_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	if (gsi != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		kvm_notify_acked_gsi(kvm, gsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	srcu_read_unlock(&kvm->irq_srcu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) void kvm_register_irq_ack_notifier(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 				   struct kvm_irq_ack_notifier *kian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	mutex_lock(&kvm->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	mutex_unlock(&kvm->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	kvm_arch_post_irq_ack_notifier_list_update(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 				    struct kvm_irq_ack_notifier *kian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	mutex_lock(&kvm->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	hlist_del_init_rcu(&kian->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	mutex_unlock(&kvm->irq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	synchronize_srcu(&kvm->irq_srcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	kvm_arch_post_irq_ack_notifier_list_update(kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) kvm_eventfd_init(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) #ifdef CONFIG_HAVE_KVM_IRQFD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	spin_lock_init(&kvm->irqfds.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	INIT_LIST_HEAD(&kvm->irqfds.items);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	mutex_init(&kvm->irqfds.resampler_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	INIT_LIST_HEAD(&kvm->ioeventfds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) #ifdef CONFIG_HAVE_KVM_IRQFD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)  * shutdown any irqfd's that match fd+gsi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	struct kvm_kernel_irqfd *irqfd, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	struct eventfd_ctx *eventfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	eventfd = eventfd_ctx_fdget(args->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	if (IS_ERR(eventfd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		return PTR_ERR(eventfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	spin_lock_irq(&kvm->irqfds.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 			 * This clearing of irq_entry.type is needed for when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 			 * another thread calls kvm_irq_routing_update before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 			 * we flush workqueue below (we synchronize with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 			 * kvm_irq_routing_update using irqfds.lock).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 			write_seqcount_begin(&irqfd->irq_entry_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 			irqfd->irq_entry.type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 			write_seqcount_end(&irqfd->irq_entry_sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 			irqfd_deactivate(irqfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	spin_unlock_irq(&kvm->irqfds.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	eventfd_ctx_put(eventfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	 * Block until we know all outstanding shutdown jobs have completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	 * so that we guarantee there will not be any more interrupts on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	 * gsi once this deassign function returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	flush_workqueue(irqfd_cleanup_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		return kvm_irqfd_deassign(kvm, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	return kvm_irqfd_assign(kvm, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)  * This function is called as the kvm VM fd is being released. Shutdown all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)  * irqfds that still remain open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) kvm_irqfd_release(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	struct kvm_kernel_irqfd *irqfd, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	spin_lock_irq(&kvm->irqfds.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		irqfd_deactivate(irqfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	spin_unlock_irq(&kvm->irqfds.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	 * Block until we know all outstanding shutdown jobs have completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	 * since we do not take a kvm* reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	flush_workqueue(irqfd_cleanup_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)  * Take note of a change in irq routing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)  * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) void kvm_irq_routing_update(struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	struct kvm_kernel_irqfd *irqfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	spin_lock_irq(&kvm->irqfds.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		irqfd_update(kvm, irqfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		if (irqfd->producer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 			int ret = kvm_arch_update_irqfd_routing(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 					irqfd->kvm, irqfd->producer->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 					irqfd->gsi, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 			WARN_ON(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	spin_unlock_irq(&kvm->irqfds.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)  * create a host-wide workqueue for issuing deferred shutdown requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)  * aggregated from all vm* instances. We need our own isolated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)  * queue to ease flushing work items when a VM exits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) int kvm_irqfd_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	if (!irqfd_cleanup_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) void kvm_irqfd_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	destroy_workqueue(irqfd_cleanup_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)  * --------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)  * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)  * userspace can register a PIO/MMIO address with an eventfd for receiving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)  * notification when the memory has been touched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)  * --------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct _ioeventfd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	struct list_head     list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	u64                  addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	int                  length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	struct eventfd_ctx  *eventfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	u64                  datamatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	struct kvm_io_device dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	u8                   bus_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	bool                 wildcard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static inline struct _ioeventfd *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) to_ioeventfd(struct kvm_io_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	return container_of(dev, struct _ioeventfd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ioeventfd_release(struct _ioeventfd *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	eventfd_ctx_put(p->eventfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	list_del(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	u64 _val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	if (addr != p->addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 		/* address must be precise for a hit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	if (!p->length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 		/* length = 0 means only look at the address, so always a hit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	if (len != p->length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 		/* address-range must be precise for a hit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	if (p->wildcard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		/* all else equal, wildcard is always a hit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	/* otherwise, we have to actually compare the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	BUG_ON(!IS_ALIGNED((unsigned long)val, len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 	switch (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 		_val = *(u8 *)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 		_val = *(u16 *)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 		_val = *(u32 *)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 	case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 		_val = *(u64 *)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	return _val == p->datamatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /* MMIO/PIO writes trigger an event if the addr/val match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 		int len, const void *val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	struct _ioeventfd *p = to_ioeventfd(this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	if (!ioeventfd_in_range(p, addr, len, val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 		return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 	eventfd_signal(p->eventfd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)  * This function is called as KVM is completely shutting down.  We do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)  * need to worry about locking just nuke anything we have as quickly as possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ioeventfd_destructor(struct kvm_io_device *this)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	struct _ioeventfd *p = to_ioeventfd(this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	ioeventfd_release(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static const struct kvm_io_device_ops ioeventfd_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	.write      = ioeventfd_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	.destructor = ioeventfd_destructor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* assumes kvm->slots_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	struct _ioeventfd *_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	list_for_each_entry(_p, &kvm->ioeventfds, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 		if (_p->bus_idx == p->bus_idx &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 		    _p->addr == p->addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 		    (!_p->length || !p->length ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 		     (_p->length == p->length &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 		      (_p->wildcard || p->wildcard ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 		       _p->datamatch == p->datamatch))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	if (flags & KVM_IOEVENTFD_FLAG_PIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 		return KVM_PIO_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 		return KVM_VIRTIO_CCW_NOTIFY_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	return KVM_MMIO_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 				enum kvm_bus bus_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 				struct kvm_ioeventfd *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	struct eventfd_ctx *eventfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 	struct _ioeventfd *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 	eventfd = eventfd_ctx_fdget(args->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	if (IS_ERR(eventfd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 		return PTR_ERR(eventfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	p = kzalloc(sizeof(*p), GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	INIT_LIST_HEAD(&p->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 	p->addr    = args->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	p->bus_idx = bus_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	p->length  = args->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 	p->eventfd = eventfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	/* The datamatch feature is optional, otherwise this is a wildcard */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 		p->datamatch = args->datamatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 		p->wildcard = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	mutex_lock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	/* Verify that there isn't a match already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 	if (ioeventfd_check_collision(kvm, p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 		ret = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 		goto unlock_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 	kvm_iodevice_init(&p->dev, &ioeventfd_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 	ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 				      &p->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 		goto unlock_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 	kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 	list_add_tail(&p->list, &kvm->ioeventfds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 	mutex_unlock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) unlock_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	mutex_unlock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 	kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 	eventfd_ctx_put(eventfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 			   struct kvm_ioeventfd *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 	struct _ioeventfd        *p, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 	struct eventfd_ctx       *eventfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 	struct kvm_io_bus	 *bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 	int                       ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 	bool                      wildcard;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 	eventfd = eventfd_ctx_fdget(args->fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 	if (IS_ERR(eventfd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 		return PTR_ERR(eventfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 	wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 	mutex_lock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 	list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 		if (p->bus_idx != bus_idx ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 		    p->eventfd != eventfd  ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 		    p->addr != args->addr  ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 		    p->length != args->len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 		    p->wildcard != wildcard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 		if (!p->wildcard && p->datamatch != args->datamatch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 		bus = kvm_get_bus(kvm, bus_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 		if (bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 			bus->ioeventfd_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 		ioeventfd_release(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 	mutex_unlock(&kvm->slots_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 	eventfd_ctx_put(eventfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 	enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 	int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) 	if (!args->len && bus_idx == KVM_MMIO_BUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 		kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 	enum kvm_bus              bus_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 	bus_idx = ioeventfd_bus_from_flags(args->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 	/* must be natural-word sized, or 0 to ignore length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) 	switch (args->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) 	case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) 	/* check for range overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) 	if (args->addr + args->len < args->addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) 	/* check for extra flags that we don't understand */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) 	if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) 	/* ioeventfd with no length can't be combined with DATAMATCH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) 	if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) 	ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) 	/* When length is ignored, MMIO is also put on a separate bus, for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) 	 * faster lookups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) 	if (!args->len && bus_idx == KVM_MMIO_BUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) 		ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) 			goto fast_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) fast_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) 	kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) 	if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) 		return kvm_deassign_ioeventfd(kvm, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) 	return kvm_assign_ioeventfd(kvm, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }