Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * VFIO core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *     Author: Alex Williamson <alex.williamson@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Derived from original vfio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Author: Tom Lyon, pugs@cisco.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/cdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/anon_inodes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/vfio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define DRIVER_VERSION	"0.3"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define DRIVER_AUTHOR	"Alex Williamson <alex.williamson@redhat.com>"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define DRIVER_DESC	"VFIO - User Level meta-driver"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) static struct vfio {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	struct class			*class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	struct list_head		iommu_drivers_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	struct mutex			iommu_drivers_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	struct list_head		group_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	struct idr			group_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	struct mutex			group_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	struct cdev			group_cdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	dev_t				group_devt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	wait_queue_head_t		release_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) } vfio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) struct vfio_iommu_driver {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	const struct vfio_iommu_driver_ops	*ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	struct list_head			vfio_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) struct vfio_container {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	struct kref			kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	struct list_head		group_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	struct rw_semaphore		group_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	struct vfio_iommu_driver	*iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	void				*iommu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	bool				noiommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) struct vfio_unbound_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	struct device			*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	struct list_head		unbound_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) struct vfio_group {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	struct kref			kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	int				minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	atomic_t			container_users;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	struct iommu_group		*iommu_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	struct vfio_container		*container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	struct list_head		device_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	struct mutex			device_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	struct device			*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	struct notifier_block		nb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	struct list_head		vfio_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	struct list_head		container_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	struct list_head		unbound_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	struct mutex			unbound_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	atomic_t			opened;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	wait_queue_head_t		container_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	bool				noiommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	unsigned int			dev_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	struct kvm			*kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	struct blocking_notifier_head	notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) struct vfio_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	struct kref			kref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	struct device			*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	const struct vfio_device_ops	*ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	struct vfio_group		*group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	struct list_head		group_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	void				*device_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #ifdef CONFIG_VFIO_NOIOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) static bool noiommu __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) module_param_named(enable_unsafe_noiommu_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		   noiommu, bool, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode.  This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel.  If you do not know what this is for, step away. (default: false)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110)  * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111)  * and remove functions, any use cases other than acquiring the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112)  * reference for the purpose of calling vfio_add_group_dev() or removing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  * that symmetric reference after vfio_del_group_dev() should use the raw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  * iommu_group_{get,put} functions.  In particular, vfio_iommu_group_put()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  * removes the device from the dummy group and cannot be nested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) struct iommu_group *vfio_iommu_group_get(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	struct iommu_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	int __maybe_unused ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	group = iommu_group_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #ifdef CONFIG_VFIO_NOIOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	 * With noiommu enabled, an IOMMU group will be created for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	 * that doesn't already have one and doesn't have an iommu_ops on their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	 * bus.  We set iommudata simply to be able to identify these groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	 * as special use and for reclamation later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	if (group || !noiommu || iommu_present(dev->bus))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	group = iommu_group_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	if (IS_ERR(group))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	iommu_group_set_name(group, "vfio-noiommu");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	iommu_group_set_iommudata(group, &noiommu, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	ret = iommu_group_add_device(group, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	 * Where to taint?  At this point we've added an IOMMU group for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	 * device that is not backed by iommu_ops, therefore any iommu_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	 * callback using iommu_ops can legitimately Oops.  So, while we may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	 * be about to give a DMA capable device to a user without IOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	 * protection, which is clearly taint-worthy, let's go ahead and do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	 * it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) #ifdef CONFIG_VFIO_NOIOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	if (iommu_group_get_iommudata(group) == &noiommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		iommu_group_remove_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	iommu_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) EXPORT_SYMBOL_GPL(vfio_iommu_group_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) #ifdef CONFIG_VFIO_NOIOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) static void *vfio_noiommu_open(unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	if (arg != VFIO_NOIOMMU_IOMMU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	if (!capable(CAP_SYS_RAWIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		return ERR_PTR(-EPERM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) static void vfio_noiommu_release(void *iommu_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) static long vfio_noiommu_ioctl(void *iommu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 			       unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	if (cmd == VFIO_CHECK_EXTENSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		return noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) static int vfio_noiommu_attach_group(void *iommu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 				     struct iommu_group *iommu_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	return iommu_group_get_iommudata(iommu_group) == &noiommu ? 0 : -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static void vfio_noiommu_detach_group(void *iommu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 				      struct iommu_group *iommu_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) static const struct vfio_iommu_driver_ops vfio_noiommu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	.name = "vfio-noiommu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	.open = vfio_noiommu_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	.release = vfio_noiommu_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	.ioctl = vfio_noiommu_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	.attach_group = vfio_noiommu_attach_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	.detach_group = vfio_noiommu_detach_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * IOMMU driver registration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	struct vfio_iommu_driver *driver, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	driver = kzalloc(sizeof(*driver), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	if (!driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	driver->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	mutex_lock(&vfio.iommu_drivers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	/* Check for duplicates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		if (tmp->ops == ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 			mutex_unlock(&vfio.iommu_drivers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 			kfree(driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	mutex_unlock(&vfio.iommu_drivers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	mutex_lock(&vfio.iommu_drivers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		if (driver->ops == ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 			list_del(&driver->vfio_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 			mutex_unlock(&vfio.iommu_drivers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 			kfree(driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	mutex_unlock(&vfio.iommu_drivers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270)  * Group minor allocation/free - both called with vfio.group_lock held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) static int vfio_alloc_group_minor(struct vfio_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) static void vfio_free_group_minor(int minor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	idr_remove(&vfio.group_idr, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) static int vfio_iommu_group_notifier(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 				     unsigned long action, void *data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) static void vfio_group_get(struct vfio_group *group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  * Container objects - containers are created when /dev/vfio/vfio is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)  * opened, but their lifecycle extends until the last user is done, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)  * it's freed via kref.  Must support container/group/device being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  * closed in any order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) static void vfio_container_get(struct vfio_container *container)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	kref_get(&container->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) static void vfio_container_release(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	struct vfio_container *container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	container = container_of(kref, struct vfio_container, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	kfree(container);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) static void vfio_container_put(struct vfio_container *container)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	kref_put(&container->kref, vfio_container_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) static void vfio_group_unlock_and_free(struct vfio_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	mutex_unlock(&vfio.group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	 * Unregister outside of lock.  A spurious callback is harmless now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	 * that the group is no longer in vfio.group_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	iommu_group_unregister_notifier(group->iommu_group, &group->nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	kfree(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  * Group objects - create, release, get, put, search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	struct vfio_group *group, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	int ret, minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	group = kzalloc(sizeof(*group), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	kref_init(&group->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	INIT_LIST_HEAD(&group->device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	mutex_init(&group->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	INIT_LIST_HEAD(&group->unbound_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	mutex_init(&group->unbound_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	atomic_set(&group->container_users, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	atomic_set(&group->opened, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	init_waitqueue_head(&group->container_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	group->iommu_group = iommu_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) #ifdef CONFIG_VFIO_NOIOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	group->nb.notifier_call = vfio_iommu_group_notifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	 * blocking notifiers acquire a rwsem around registering and hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	 * it around callback.  Therefore, need to register outside of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	 * vfio.group_lock to avoid A-B/B-A contention.  Our callback won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	 * do anything unless it can find the group in vfio.group_list, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	 * no harm in registering early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	ret = iommu_group_register_notifier(iommu_group, &group->nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		kfree(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	mutex_lock(&vfio.group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	/* Did we race creating this group? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	list_for_each_entry(tmp, &vfio.group_list, vfio_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		if (tmp->iommu_group == iommu_group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			vfio_group_get(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 			vfio_group_unlock_and_free(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 			return tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	minor = vfio_alloc_group_minor(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	if (minor < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		vfio_group_unlock_and_free(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		return ERR_PTR(minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	dev = device_create(vfio.class, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 			    MKDEV(MAJOR(vfio.group_devt), minor),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			    group, "%s%d", group->noiommu ? "noiommu-" : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			    iommu_group_id(iommu_group));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	if (IS_ERR(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		vfio_free_group_minor(minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		vfio_group_unlock_and_free(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		return ERR_CAST(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	group->minor = minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	group->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	list_add(&group->vfio_next, &vfio.group_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	mutex_unlock(&vfio.group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) /* called with vfio.group_lock held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) static void vfio_group_release(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	struct vfio_group *group = container_of(kref, struct vfio_group, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	struct vfio_unbound_dev *unbound, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	struct iommu_group *iommu_group = group->iommu_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	WARN_ON(!list_empty(&group->device_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	WARN_ON(group->notifier.head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	list_for_each_entry_safe(unbound, tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 				 &group->unbound_list, unbound_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		list_del(&unbound->unbound_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		kfree(unbound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	list_del(&group->vfio_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	vfio_free_group_minor(group->minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	vfio_group_unlock_and_free(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	iommu_group_put(iommu_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) static void vfio_group_put(struct vfio_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) struct vfio_group_put_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	struct work_struct work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	struct vfio_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) static void vfio_group_put_bg(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	struct vfio_group_put_work *do_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	do_work = container_of(work, struct vfio_group_put_work, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	vfio_group_put(do_work->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	kfree(do_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) static void vfio_group_schedule_put(struct vfio_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	struct vfio_group_put_work *do_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	if (WARN_ON(!do_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	INIT_WORK(&do_work->work, vfio_group_put_bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	do_work->group = group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	schedule_work(&do_work->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) /* Assume group_lock or group reference is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) static void vfio_group_get(struct vfio_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	kref_get(&group->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463)  * Not really a try as we will sleep for mutex, but we need to make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464)  * sure the group pointer is valid under lock and get a reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) static struct vfio_group *vfio_group_try_get(struct vfio_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	struct vfio_group *target = group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	mutex_lock(&vfio.group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	list_for_each_entry(group, &vfio.group_list, vfio_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		if (group == target) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			vfio_group_get(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			mutex_unlock(&vfio.group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 			return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	mutex_unlock(&vfio.group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	struct vfio_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	mutex_lock(&vfio.group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	list_for_each_entry(group, &vfio.group_list, vfio_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		if (group->iommu_group == iommu_group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 			vfio_group_get(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			mutex_unlock(&vfio.group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	mutex_unlock(&vfio.group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) static struct vfio_group *vfio_group_get_from_minor(int minor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	struct vfio_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	mutex_lock(&vfio.group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	group = idr_find(&vfio.group_idr, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	if (!group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		mutex_unlock(&vfio.group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	vfio_group_get(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	mutex_unlock(&vfio.group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	struct iommu_group *iommu_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	struct vfio_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	iommu_group = iommu_group_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	if (!iommu_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	group = vfio_group_get_from_iommu(iommu_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	iommu_group_put(iommu_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533)  * Device objects - create, release, get, put, search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) struct vfio_device *vfio_group_create_device(struct vfio_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 					     struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 					     const struct vfio_device_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 					     void *device_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	struct vfio_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	device = kzalloc(sizeof(*device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	if (!device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	kref_init(&device->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	device->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	device->group = group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	device->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	device->device_data = device_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	dev_set_drvdata(dev, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	/* No need to get group_lock, caller has group reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	vfio_group_get(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	mutex_lock(&group->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	list_add(&device->group_next, &group->device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	group->dev_counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	mutex_unlock(&group->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	return device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) static void vfio_device_release(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	struct vfio_device *device = container_of(kref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 						  struct vfio_device, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	struct vfio_group *group = device->group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	list_del(&device->group_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	group->dev_counter--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	mutex_unlock(&group->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	dev_set_drvdata(device->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	kfree(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	/* vfio_del_group_dev may be waiting for this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	wake_up(&vfio.release_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) /* Device reference always implies a group reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) void vfio_device_put(struct vfio_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	struct vfio_group *group = device->group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) EXPORT_SYMBOL_GPL(vfio_device_put);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) static void vfio_device_get(struct vfio_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	vfio_group_get(device->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	kref_get(&device->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 						 struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	struct vfio_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	mutex_lock(&group->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	list_for_each_entry(device, &group->device_list, group_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		if (device->dev == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			vfio_device_get(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			mutex_unlock(&group->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			return device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	mutex_unlock(&group->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616)  * Some drivers, like pci-stub, are only used to prevent other drivers from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617)  * claiming a device and are therefore perfectly legitimate for a user owned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618)  * group.  The pci-stub driver has no dependencies on DMA or the IOVA mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619)  * of the device, but it does prevent the user from having direct access to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620)  * the device, which is useful in some circumstances.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622)  * We also assume that we can include PCI interconnect devices, ie. bridges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623)  * IOMMU grouping on PCI necessitates that if we lack isolation on a bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624)  * then all of the downstream devices will be part of the same IOMMU group as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625)  * the bridge.  Thus, if placing the bridge into the user owned IOVA space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626)  * breaks anything, it only does so for user owned devices downstream.  Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627)  * that error notification via MSI can be affected for platforms that handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628)  * MSI within the same IOVA space as DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) static const char * const vfio_driver_allowed[] = { "pci-stub" };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) static bool vfio_dev_driver_allowed(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 				    struct device_driver *drv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	if (dev_is_pci(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		struct pci_dev *pdev = to_pci_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	return match_string(vfio_driver_allowed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			    ARRAY_SIZE(vfio_driver_allowed),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			    drv->name) >= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648)  * A vfio group is viable for use by userspace if all devices are in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649)  * one of the following states:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650)  *  - driver-less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  *  - bound to a vfio driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652)  *  - bound to an otherwise allowed driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653)  *  - a PCI interconnect device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655)  * We use two methods to determine whether a device is bound to a vfio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656)  * driver.  The first is to test whether the device exists in the vfio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657)  * group.  The second is to test if the device exists on the group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658)  * unbound_list, indicating it's in the middle of transitioning from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659)  * a vfio driver to driver-less.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) static int vfio_dev_viable(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	struct vfio_group *group = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	struct vfio_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	struct device_driver *drv = READ_ONCE(dev->driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	struct vfio_unbound_dev *unbound;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	mutex_lock(&group->unbound_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		if (dev == unbound->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	mutex_unlock(&group->unbound_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	if (!ret || !drv || vfio_dev_driver_allowed(dev, drv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	device = vfio_group_get_device(group, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	if (device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		vfio_device_put(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691)  * Async device support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	struct vfio_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	/* Do we already know about it?  We shouldn't */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	device = vfio_group_get_device(group, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (WARN_ON_ONCE(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		vfio_device_put(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	/* Nothing to do for idle groups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	if (!atomic_read(&group->container_users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	/* TODO Prevent device auto probing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	dev_WARN(dev, "Device added to live group %d!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		 iommu_group_id(group->iommu_group));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	/* We don't care what happens when the group isn't in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	if (!atomic_read(&group->container_users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	return vfio_dev_viable(dev, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) static int vfio_iommu_group_notifier(struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 				     unsigned long action, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	struct vfio_group *group = container_of(nb, struct vfio_group, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	struct device *dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	struct vfio_unbound_dev *unbound;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	 * Need to go through a group_lock lookup to get a reference or we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	 * risk racing a group being removed.  Ignore spurious notifies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	group = vfio_group_try_get(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	switch (action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		vfio_group_nb_add_dev(group, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		 * Nothing to do here.  If the device is in use, then the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		 * vfio sub-driver should block the remove callback until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		 * it is unused.  If the device is unused or attached to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		 * stub driver, then it should be released and we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		 * care that it will be going away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		dev_dbg(dev, "%s: group %d binding to driver\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 			iommu_group_id(group->iommu_group));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		dev_dbg(dev, "%s: group %d bound to driver %s\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 			iommu_group_id(group->iommu_group), dev->driver->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		BUG_ON(vfio_group_nb_verify(group, dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 		dev_dbg(dev, "%s: group %d unbinding from driver %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			__func__, iommu_group_id(group->iommu_group),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			dev->driver->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		dev_dbg(dev, "%s: group %d unbound from driver\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			iommu_group_id(group->iommu_group));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		 * XXX An unbound device in a live group is ok, but we'd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		 * really like to avoid the above BUG_ON by preventing other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		 * drivers from binding to it.  Once that occurs, we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		 * stop the system to maintain isolation.  At a minimum, we'd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		 * want a toggle to disable driver auto probe for this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		mutex_lock(&group->unbound_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		list_for_each_entry(unbound,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 				    &group->unbound_list, unbound_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			if (dev == unbound->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 				list_del(&unbound->unbound_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 				kfree(unbound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		mutex_unlock(&group->unbound_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	 * If we're the last reference to the group, the group will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	 * released, which includes unregistering the iommu group notifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	 * We hold a read-lock on that notifier list, unregistering needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	 * a write-lock... deadlock.  Release our reference asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	 * to avoid that situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	vfio_group_schedule_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802)  * VFIO driver API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) int vfio_add_group_dev(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		       const struct vfio_device_ops *ops, void *device_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	struct iommu_group *iommu_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	struct vfio_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	struct vfio_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	iommu_group = iommu_group_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	if (!iommu_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	group = vfio_group_get_from_iommu(iommu_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	if (!group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		group = vfio_create_group(iommu_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		if (IS_ERR(group)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			iommu_group_put(iommu_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			return PTR_ERR(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		 * A found vfio_group already holds a reference to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		 * iommu_group.  A created vfio_group keeps the reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		iommu_group_put(iommu_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	device = vfio_group_get_device(group, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	if (device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		dev_WARN(dev, "Device already exists on group %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 			 iommu_group_id(iommu_group));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		vfio_device_put(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	device = vfio_group_create_device(group, dev, ops, device_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (IS_ERR(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		return PTR_ERR(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	 * Drop all but the vfio_device reference.  The vfio_device holds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	 * a reference to the vfio_group, which holds a reference to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	 * iommu_group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) EXPORT_SYMBOL_GPL(vfio_add_group_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857)  * Get a reference to the vfio_device for a device.  Even if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858)  * caller thinks they own the device, they could be racing with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859)  * release call path, so we can't trust drvdata for the shortcut.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860)  * Go the long way around, from the iommu_group to the vfio_group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861)  * to the vfio_device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) struct vfio_device *vfio_device_get_from_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	struct vfio_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	struct vfio_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	group = vfio_group_get_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	device = vfio_group_get_device(group, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	return device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 						     char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	struct vfio_device *it, *device = ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	mutex_lock(&group->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	list_for_each_entry(it, &group->device_list, group_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		if (it->ops->match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 			ret = it->ops->match(it->device_data, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 				device = ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			ret = !strcmp(dev_name(it->dev), buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			device = it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			vfio_device_get(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	mutex_unlock(&group->device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	return device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  * Caller must hold a reference to the vfio_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) void *vfio_device_data(struct vfio_device *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	return device->device_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) EXPORT_SYMBOL_GPL(vfio_device_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919)  * Decrement the device reference count and wait for the device to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920)  * removed.  Open file descriptors for the device... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) void *vfio_del_group_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	struct vfio_device *device = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	struct vfio_group *group = device->group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	void *device_data = device->device_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	struct vfio_unbound_dev *unbound;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	bool interrupted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	 * The group exists so long as we have a device reference.  Get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	 * a group reference and use it to scan for the device going away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	vfio_group_get(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	 * When the device is removed from the group, the group suddenly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	 * becomes non-viable; the device has a driver (until the unbind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	 * completes), but it's not present in the group.  This is bad news
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	 * for any external users that need to re-acquire a group reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	 * in order to match and release their existing reference.  To
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	 * solve this, we track such devices on the unbound_list to bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	 * the gap until they're fully unbound.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	if (unbound) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		unbound->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		mutex_lock(&group->unbound_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		list_add(&unbound->unbound_next, &group->unbound_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		mutex_unlock(&group->unbound_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	WARN_ON(!unbound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	vfio_device_put(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	 * If the device is still present in the group after the above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	 * 'put', then it is in use and we need to request it from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	 * bus driver.  The driver may in turn need to request the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	 * device from the user.  We send the request on an arbitrary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	 * interval with counter to allow the driver to take escalating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	 * measures to release the device if it has the ability to do so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	add_wait_queue(&vfio.release_q, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		device = vfio_group_get_device(group, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 		if (!device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		if (device->ops->request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			device->ops->request(device_data, i++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		vfio_device_put(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		if (interrupted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			wait_woken(&wait, TASK_UNINTERRUPTIBLE, HZ * 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			wait_woken(&wait, TASK_INTERRUPTIBLE, HZ * 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			if (signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 				interrupted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 				dev_warn(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 					 "Device is currently in use, task"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 					 " \"%s\" (%d) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 					 "blocked until device is released",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 					 current->comm, task_pid_nr(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	} while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	remove_wait_queue(&vfio.release_q, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	 * In order to support multiple devices per group, devices can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	 * plucked from the group while other devices in the group are still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	 * in use.  The container persists with this group and those remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	 * devices still attached.  If the user creates an isolation violation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	 * by binding this device to another driver while the group is still in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	 * use, that's their fault.  However, in the case of removing the last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	 * or potentially the only, device in the group there can be no other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	 * in-use devices in the group.  The user has done their due diligence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	 * and we should lay no claims to those devices.  In order to do that,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	 * we need to make sure the group is detached from the container.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	 * Without this stall, we're potentially racing with a user process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	 * that may attempt to immediately bind this device to another driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	if (list_empty(&group->device_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		wait_event(group->container_q, !group->container);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	return device_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) EXPORT_SYMBOL_GPL(vfio_del_group_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  * VFIO base fd, /dev/vfio/vfio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) static long vfio_ioctl_check_extension(struct vfio_container *container,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 				       unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	long ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	down_read(&container->group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	driver = container->iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	switch (arg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		/* No base extensions yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		 * If no driver is set, poll all registered drivers for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		 * extensions and return the first positive result.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		 * a driver is already set, further queries will be passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		 * only to that driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		if (!driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			mutex_lock(&vfio.iommu_drivers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			list_for_each_entry(driver, &vfio.iommu_drivers_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 					    vfio_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) #ifdef CONFIG_VFIO_NOIOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 				if (!list_empty(&container->group_list) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 				    (container->noiommu !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 				     (driver->ops == &vfio_noiommu_ops)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 				if (!try_module_get(driver->ops->owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 				ret = driver->ops->ioctl(NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 							 VFIO_CHECK_EXTENSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 							 arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 				module_put(driver->ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 				if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			mutex_unlock(&vfio.iommu_drivers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			ret = driver->ops->ioctl(container->iommu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 						 VFIO_CHECK_EXTENSION, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	up_read(&container->group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) /* hold write lock on container->group_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static int __vfio_container_attach_groups(struct vfio_container *container,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 					  struct vfio_iommu_driver *driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 					  void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	struct vfio_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	list_for_each_entry(group, &container->group_list, container_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		ret = driver->ops->attach_group(data, group->iommu_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 			goto unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) unwind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	list_for_each_entry_continue_reverse(group, &container->group_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 					     container_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		driver->ops->detach_group(data, group->iommu_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) static long vfio_ioctl_set_iommu(struct vfio_container *container,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 				 unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	long ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	down_write(&container->group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	 * The container is designed to be an unprivileged interface while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	 * the group can be assigned to specific users.  Therefore, only by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	 * adding a group to a container does the user get the privilege of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	 * enabling the iommu, which may allocate finite resources.  There
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	 * is no unset_iommu, but by removing all the groups from a container,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	 * the container is deprivileged and returns to an unset state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	if (list_empty(&container->group_list) || container->iommu_driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		up_write(&container->group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	mutex_lock(&vfio.iommu_drivers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) #ifdef CONFIG_VFIO_NOIOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		 * Only noiommu containers can use vfio-noiommu and noiommu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		 * containers can only use vfio-noiommu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		if (container->noiommu != (driver->ops == &vfio_noiommu_ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		if (!try_module_get(driver->ops->owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		 * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		 * so test which iommu driver reported support for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		 * extension and call open on them.  We also pass them the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		 * magic, allowing a single driver to support multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		 * interfaces if they'd like.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			module_put(driver->ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		data = driver->ops->open(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		if (IS_ERR(data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 			ret = PTR_ERR(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 			module_put(driver->ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		ret = __vfio_container_attach_groups(container, driver, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 			driver->ops->release(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 			module_put(driver->ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		container->iommu_driver = driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		container->iommu_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	mutex_unlock(&vfio.iommu_drivers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	up_write(&container->group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) static long vfio_fops_unl_ioctl(struct file *filep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 				unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	struct vfio_container *container = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	void *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	long ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (!container)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	case VFIO_GET_API_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		ret = VFIO_API_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	case VFIO_CHECK_EXTENSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		ret = vfio_ioctl_check_extension(container, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	case VFIO_SET_IOMMU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		ret = vfio_ioctl_set_iommu(container, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		driver = container->iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		data = container->iommu_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		if (driver) /* passthrough all unrecognized ioctls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 			ret = driver->ops->ioctl(data, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) static int vfio_fops_open(struct inode *inode, struct file *filep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	struct vfio_container *container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	container = kzalloc(sizeof(*container), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	if (!container)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	INIT_LIST_HEAD(&container->group_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	init_rwsem(&container->group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	kref_init(&container->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	filep->private_data = container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) static int vfio_fops_release(struct inode *inode, struct file *filep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	struct vfio_container *container = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	filep->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	vfio_container_put(container);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)  * Once an iommu driver is set, we optionally pass read/write/mmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)  * on to the driver, allowing management interfaces beyond ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 			      size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	struct vfio_container *container = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	ssize_t ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	driver = container->iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	if (likely(driver && driver->ops->read))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		ret = driver->ops->read(container->iommu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 					buf, count, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			       size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	struct vfio_container *container = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	ssize_t ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	driver = container->iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	if (likely(driver && driver->ops->write))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		ret = driver->ops->write(container->iommu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 					 buf, count, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	struct vfio_container *container = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	driver = container->iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	if (likely(driver && driver->ops->mmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		ret = driver->ops->mmap(container->iommu_data, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static const struct file_operations vfio_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	.open		= vfio_fops_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	.release	= vfio_fops_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	.read		= vfio_fops_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	.write		= vfio_fops_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	.unlocked_ioctl	= vfio_fops_unl_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	.compat_ioctl	= compat_ptr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	.mmap		= vfio_fops_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)  * VFIO Group fd, /dev/vfio/$GROUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static void __vfio_group_unset_container(struct vfio_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	struct vfio_container *container = group->container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	down_write(&container->group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	driver = container->iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	if (driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		driver->ops->detach_group(container->iommu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 					  group->iommu_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	group->container = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	wake_up(&group->container_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	list_del(&group->container_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	/* Detaching the last group deprivileges a container, remove iommu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	if (driver && list_empty(&container->group_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		driver->ops->release(container->iommu_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		module_put(driver->ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		container->iommu_driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		container->iommu_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	up_write(&container->group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	vfio_container_put(container);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)  * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)  * if there was no container to unset.  Since the ioctl is called on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)  * the group, we know that still exists, therefore the only valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)  * transition here is 1->0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) static int vfio_group_unset_container(struct vfio_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	int users = atomic_cmpxchg(&group->container_users, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	if (!users)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	if (users != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	__vfio_group_unset_container(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)  * When removing container users, anything that removes the last user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)  * implicitly removes the group from the container.  That is, if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)  * group file descriptor is closed, as well as any device file descriptors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)  * the group is free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) static void vfio_group_try_dissolve_container(struct vfio_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	if (0 == atomic_dec_if_positive(&group->container_users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		__vfio_group_unset_container(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) static int vfio_group_set_container(struct vfio_group *group, int container_fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	struct fd f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	struct vfio_container *container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	if (atomic_read(&group->container_users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	if (group->noiommu && !capable(CAP_SYS_RAWIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	f = fdget(container_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	if (!f.file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		return -EBADF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	/* Sanity check, is this really our fd? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	if (f.file->f_op != &vfio_fops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	container = f.file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	WARN_ON(!container); /* fget ensures we don't race vfio_release */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	down_write(&container->group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	/* Real groups and fake groups cannot mix */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	if (!list_empty(&container->group_list) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	    container->noiommu != group->noiommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	driver = container->iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	if (driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		ret = driver->ops->attach_group(container->iommu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 						group->iommu_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 			goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	group->container = container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	container->noiommu = group->noiommu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	list_add(&group->container_next, &container->group_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	/* Get a reference on the container and mark a user within the group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	vfio_container_get(container);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	atomic_inc(&group->container_users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	up_write(&container->group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	fdput(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) static bool vfio_group_viable(struct vfio_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	return (iommu_group_for_each_dev(group->iommu_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 					 group, vfio_dev_viable) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) static int vfio_group_add_container_user(struct vfio_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	if (!atomic_inc_not_zero(&group->container_users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	if (group->noiommu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		atomic_dec(&group->container_users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	if (!group->container->iommu_driver || !vfio_group_viable(group)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		atomic_dec(&group->container_users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) static const struct file_operations vfio_device_fops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	struct vfio_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	struct file *filep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	if (0 == atomic_read(&group->container_users) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	    !group->container->iommu_driver || !vfio_group_viable(group))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (group->noiommu && !capable(CAP_SYS_RAWIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	device = vfio_device_get_from_name(group, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	if (IS_ERR(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		return PTR_ERR(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	ret = device->ops->open(device->device_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		vfio_device_put(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	 * We can't use anon_inode_getfd() because we need to modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	 * the f_mode flags directly to allow more than just ioctls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	ret = get_unused_fd_flags(O_CLOEXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		device->ops->release(device->device_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		vfio_device_put(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 				   device, O_RDWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	if (IS_ERR(filep)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		put_unused_fd(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		ret = PTR_ERR(filep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		device->ops->release(device->device_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		vfio_device_put(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	 * TODO: add an anon_inode interface to do this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	 * Appears to be missing by lack of need rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	 * explicitly prevented.  Now there's need.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	atomic_inc(&group->container_users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	fd_install(ret, filep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	if (group->noiommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		dev_warn(device->dev, "vfio-noiommu device opened by user "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 			 "(%s:%d)\n", current->comm, task_pid_nr(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) static long vfio_group_fops_unl_ioctl(struct file *filep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 				      unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	struct vfio_group *group = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	long ret = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	case VFIO_GROUP_GET_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		struct vfio_group_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		unsigned long minsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		minsz = offsetofend(struct vfio_group_status, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		if (copy_from_user(&status, (void __user *)arg, minsz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 		if (status.argsz < minsz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		status.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		if (vfio_group_viable(group))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 			status.flags |= VFIO_GROUP_FLAGS_VIABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		if (group->container)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		if (copy_to_user((void __user *)arg, &status, minsz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	case VFIO_GROUP_SET_CONTAINER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		int fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		if (get_user(fd, (int __user *)arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 		if (fd < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		ret = vfio_group_set_container(group, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	case VFIO_GROUP_UNSET_CONTAINER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		ret = vfio_group_unset_container(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	case VFIO_GROUP_GET_DEVICE_FD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		buf = strndup_user((const char __user *)arg, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		if (IS_ERR(buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 			return PTR_ERR(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		ret = vfio_group_get_device_fd(group, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) static int vfio_group_fops_open(struct inode *inode, struct file *filep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	struct vfio_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	int opened;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	group = vfio_group_get_from_minor(iminor(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	/* Do we need multiple instances of the group open?  Seems not. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	opened = atomic_cmpxchg(&group->opened, 0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	if (opened) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	/* Is something still in use from a previous open? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	if (group->container) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		atomic_dec(&group->opened);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	/* Warn if previous user didn't cleanup and re-init to drop them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	if (WARN_ON(group->notifier.head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	filep->private_data = group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static int vfio_group_fops_release(struct inode *inode, struct file *filep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	struct vfio_group *group = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	filep->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	vfio_group_try_dissolve_container(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	atomic_dec(&group->opened);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) static const struct file_operations vfio_group_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	.unlocked_ioctl	= vfio_group_fops_unl_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	.compat_ioctl	= compat_ptr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	.open		= vfio_group_fops_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	.release	= vfio_group_fops_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)  * VFIO Device fd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) static int vfio_device_fops_release(struct inode *inode, struct file *filep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	struct vfio_device *device = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	device->ops->release(device->device_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	vfio_group_try_dissolve_container(device->group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	vfio_device_put(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) static long vfio_device_fops_unl_ioctl(struct file *filep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 				       unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	struct vfio_device *device = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	if (unlikely(!device->ops->ioctl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	return device->ops->ioctl(device->device_data, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 				     size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	struct vfio_device *device = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	if (unlikely(!device->ops->read))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	return device->ops->read(device->device_data, buf, count, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) static ssize_t vfio_device_fops_write(struct file *filep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 				      const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 				      size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	struct vfio_device *device = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	if (unlikely(!device->ops->write))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	return device->ops->write(device->device_data, buf, count, ppos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	struct vfio_device *device = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	if (unlikely(!device->ops->mmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	return device->ops->mmap(device->device_data, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) static const struct file_operations vfio_device_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	.release	= vfio_device_fops_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	.read		= vfio_device_fops_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	.write		= vfio_device_fops_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	.unlocked_ioctl	= vfio_device_fops_unl_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	.compat_ioctl	= compat_ptr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	.mmap		= vfio_device_fops_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)  * External user API, exported by symbols to be linked dynamically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)  * The protocol includes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)  *  1. do normal VFIO init operation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)  *	- opening a new container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)  *	- attaching group(s) to it;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)  *	- setting an IOMMU driver for a container.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)  * When IOMMU is set for a container, all groups in it are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)  * considered ready to use by an external user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)  * 2. User space passes a group fd to an external user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)  * The external user calls vfio_group_get_external_user()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)  * to verify that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)  *	- the group is initialized;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)  *	- IOMMU is set for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)  * If both checks passed, vfio_group_get_external_user()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)  * increments the container user counter to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)  * the VFIO group from disposal before KVM exits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714)  * 3. The external user calls vfio_external_user_iommu_id()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)  * to know an IOMMU ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)  * 4. When the external KVM finishes, it calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)  * vfio_group_put_external_user() to release the VFIO group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)  * This call decrements the container user counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) struct vfio_group *vfio_group_get_external_user(struct file *filep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	struct vfio_group *group = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	if (filep->f_op != &vfio_group_fops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	ret = vfio_group_add_container_user(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	vfio_group_get(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)  * External user API, exported by symbols to be linked dynamically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)  * The external user passes in a device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)  * to verify that:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)  *	- A VFIO group is assiciated with the device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)  *	- IOMMU is set for the group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745)  * If both checks passed, vfio_group_get_external_user_from_dev()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)  * increments the container user counter to prevent the VFIO group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)  * from disposal before external user exits and returns the pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)  * to the VFIO group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)  * When the external user finishes using the VFIO group, it calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)  * vfio_group_put_external_user() to release the VFIO group and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)  * decrement the container user counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)  * @dev [in]	: device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)  * Return error PTR or pointer to VFIO group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) struct vfio_group *vfio_group_get_external_user_from_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	struct vfio_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	group = vfio_group_get_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		return ERR_PTR(-ENODEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	ret = vfio_group_add_container_user(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	return group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) EXPORT_SYMBOL_GPL(vfio_group_get_external_user_from_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) void vfio_group_put_external_user(struct vfio_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	vfio_group_try_dissolve_container(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) bool vfio_external_group_match_file(struct vfio_group *test_group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 				    struct file *filep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	struct vfio_group *group = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	return (filep->f_op == &vfio_group_fops) && (group == test_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) int vfio_external_user_iommu_id(struct vfio_group *group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	return iommu_group_id(group->iommu_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 	return vfio_ioctl_check_extension(group->container, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) EXPORT_SYMBOL_GPL(vfio_external_check_extension);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)  * Sub-module support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)  * Helper for managing a buffer of info chain capabilities, allocate or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)  * reallocate a buffer with additional @size, filling in @id and @version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)  * of the capability.  A pointer to the new capability is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)  * NB. The chain is based at the head of the buffer, so new entries are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)  * added to the tail, vfio_info_cap_shift() should be called to fixup the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)  * next offsets prior to copying to the user buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 					       size_t size, u16 id, u16 version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	struct vfio_info_cap_header *header, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		kfree(caps->buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		caps->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	caps->buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	header = buf + caps->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	/* Eventually copied to user buffer, zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	memset(header, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	header->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	header->version = version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	/* Add to the end of the capability chain */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	for (tmp = buf; tmp->next; tmp = buf + tmp->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		; /* nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	tmp->next = caps->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	caps->size += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	return header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) EXPORT_SYMBOL_GPL(vfio_info_cap_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	struct vfio_info_cap_header *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	void *buf = (void *)caps->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	for (tmp = buf; tmp->next; tmp = buf + tmp->next - offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		tmp->next += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) EXPORT_SYMBOL(vfio_info_cap_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) int vfio_info_add_capability(struct vfio_info_cap *caps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 			     struct vfio_info_cap_header *cap, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	struct vfio_info_cap_header *header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	header = vfio_info_cap_add(caps, size, cap->id, cap->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	if (IS_ERR(header))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		return PTR_ERR(header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	memcpy(header + 1, cap + 1, size - sizeof(*header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) EXPORT_SYMBOL(vfio_info_add_capability);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, int num_irqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 				       int max_irq_type, size_t *data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	unsigned long minsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	minsz = offsetofend(struct vfio_irq_set, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	if ((hdr->argsz < minsz) || (hdr->index >= max_irq_type) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	    (hdr->count >= (U32_MAX - hdr->start)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	    (hdr->flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 				VFIO_IRQ_SET_ACTION_TYPE_MASK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	if (data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		*data_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	if (hdr->start >= num_irqs || hdr->start + hdr->count > num_irqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	switch (hdr->flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	case VFIO_IRQ_SET_DATA_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	case VFIO_IRQ_SET_DATA_BOOL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		size = sizeof(uint8_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	case VFIO_IRQ_SET_DATA_EVENTFD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		size = sizeof(int32_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	if (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		if (hdr->argsz - minsz < hdr->count * size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		if (!data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		*data_size = hdr->count * size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) EXPORT_SYMBOL(vfio_set_irqs_validate_and_prepare);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)  * Pin a set of guest PFNs and return their associated host PFNs for local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)  * domain only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)  * @dev [in]     : device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)  * @user_pfn [in]: array of user/guest PFNs to be pinned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)  * @npage [in]   : count of elements in user_pfn array.  This count should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)  *		   be greater VFIO_PIN_PAGES_MAX_ENTRIES.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)  * @prot [in]    : protection flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)  * @phys_pfn[out]: array of host PFNs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)  * Return error or number of pages pinned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 		   int prot, unsigned long *phys_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	struct vfio_container *container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	struct vfio_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	if (!dev || !user_pfn || !phys_pfn || !npage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	group = vfio_group_get_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	if (group->dev_counter > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		goto err_pin_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	ret = vfio_group_add_container_user(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		goto err_pin_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	container = group->container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	driver = container->iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	if (likely(driver && driver->ops->pin_pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		ret = driver->ops->pin_pages(container->iommu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 					     group->iommu_group, user_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 					     npage, prot, phys_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		ret = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	vfio_group_try_dissolve_container(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) err_pin_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 	vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) EXPORT_SYMBOL(vfio_pin_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)  * Unpin set of host PFNs for local domain only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)  * @dev [in]     : device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)  * @user_pfn [in]: array of user/guest PFNs to be unpinned. Number of user/guest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)  *		   PFNs should not be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)  * @npage [in]   : count of elements in user_pfn array.  This count should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)  *                 be greater than VFIO_PIN_PAGES_MAX_ENTRIES.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)  * Return error or number of pages unpinned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	struct vfio_container *container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	struct vfio_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	if (!dev || !user_pfn || !npage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	group = vfio_group_get_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	ret = vfio_group_add_container_user(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		goto err_unpin_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	container = group->container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	driver = container->iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	if (likely(driver && driver->ops->unpin_pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		ret = driver->ops->unpin_pages(container->iommu_data, user_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 					       npage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		ret = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	vfio_group_try_dissolve_container(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) err_unpin_pages:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) EXPORT_SYMBOL(vfio_unpin_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025)  * Pin a set of guest IOVA PFNs and return their associated host PFNs for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)  * VFIO group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)  * The caller needs to call vfio_group_get_external_user() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029)  * vfio_group_get_external_user_from_dev() prior to calling this interface,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)  * so as to prevent the VFIO group from disposal in the middle of the call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)  * But it can keep the reference to the VFIO group for several calls into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)  * this interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)  * After finishing using of the VFIO group, the caller needs to release the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)  * VFIO group by calling vfio_group_put_external_user().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)  * @group [in]		: VFIO group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)  * @user_iova_pfn [in]	: array of user/guest IOVA PFNs to be pinned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)  * @npage [in]		: count of elements in user_iova_pfn array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)  *			  This count should not be greater
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)  *			  VFIO_PIN_PAGES_MAX_ENTRIES.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)  * @prot [in]		: protection flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)  * @phys_pfn [out]	: array of host PFNs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)  * Return error or number of pages pinned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) int vfio_group_pin_pages(struct vfio_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 			 unsigned long *user_iova_pfn, int npage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 			 int prot, unsigned long *phys_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	struct vfio_container *container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	if (!group || !user_iova_pfn || !phys_pfn || !npage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	if (group->dev_counter > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	container = group->container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	driver = container->iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	if (likely(driver && driver->ops->pin_pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		ret = driver->ops->pin_pages(container->iommu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 					     group->iommu_group, user_iova_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 					     npage, prot, phys_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		ret = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) EXPORT_SYMBOL(vfio_group_pin_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)  * Unpin a set of guest IOVA PFNs for a VFIO group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)  * The caller needs to call vfio_group_get_external_user() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)  * vfio_group_get_external_user_from_dev() prior to calling this interface,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)  * so as to prevent the VFIO group from disposal in the middle of the call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)  * But it can keep the reference to the VFIO group for several calls into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)  * this interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)  * After finishing using of the VFIO group, the caller needs to release the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)  * VFIO group by calling vfio_group_put_external_user().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)  * @group [in]		: vfio group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)  * @user_iova_pfn [in]	: array of user/guest IOVA PFNs to be unpinned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)  * @npage [in]		: count of elements in user_iova_pfn array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)  *			  This count should not be greater than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)  *			  VFIO_PIN_PAGES_MAX_ENTRIES.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)  * Return error or number of pages unpinned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) int vfio_group_unpin_pages(struct vfio_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 			   unsigned long *user_iova_pfn, int npage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	struct vfio_container *container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 	if (!group || !user_iova_pfn || !npage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	if (npage > VFIO_PIN_PAGES_MAX_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 		return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	container = group->container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	driver = container->iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	if (likely(driver && driver->ops->unpin_pages))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		ret = driver->ops->unpin_pages(container->iommu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 					       user_iova_pfn, npage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		ret = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) EXPORT_SYMBOL(vfio_group_unpin_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)  * This interface allows the CPUs to perform some sort of virtual DMA on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)  * behalf of the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)  * CPUs read/write from/into a range of IOVAs pointing to user space memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)  * into/from a kernel buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)  * As the read/write of user space memory is conducted via the CPUs and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)  * not a real device DMA, it is not necessary to pin the user space memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)  * The caller needs to call vfio_group_get_external_user() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)  * vfio_group_get_external_user_from_dev() prior to calling this interface,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)  * so as to prevent the VFIO group from disposal in the middle of the call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)  * But it can keep the reference to the VFIO group for several calls into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)  * this interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)  * After finishing using of the VFIO group, the caller needs to release the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)  * VFIO group by calling vfio_group_put_external_user().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)  * @group [in]		: VFIO group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)  * @user_iova [in]	: base IOVA of a user space buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)  * @data [in]		: pointer to kernel buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)  * @len [in]		: kernel buffer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)  * @write		: indicate read or write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)  * Return error code on failure or 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		void *data, size_t len, bool write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	struct vfio_container *container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	if (!group || !data || len <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	container = group->container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	driver = container->iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	if (likely(driver && driver->ops->dma_rw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 		ret = driver->ops->dma_rw(container->iommu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 					  user_iova, data, len, write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		ret = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) EXPORT_SYMBOL(vfio_dma_rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) static int vfio_register_iommu_notifier(struct vfio_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 					unsigned long *events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 					struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	struct vfio_container *container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	ret = vfio_group_add_container_user(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	container = group->container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	driver = container->iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	if (likely(driver && driver->ops->register_notifier))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		ret = driver->ops->register_notifier(container->iommu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 						     events, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		ret = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	vfio_group_try_dissolve_container(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) static int vfio_unregister_iommu_notifier(struct vfio_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 					  struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	struct vfio_container *container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	struct vfio_iommu_driver *driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	ret = vfio_group_add_container_user(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	container = group->container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 	driver = container->iommu_driver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	if (likely(driver && driver->ops->unregister_notifier))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		ret = driver->ops->unregister_notifier(container->iommu_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 						       nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 		ret = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	vfio_group_try_dissolve_container(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	group->kvm = kvm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	blocking_notifier_call_chain(&group->notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 				VFIO_GROUP_NOTIFY_SET_KVM, kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) EXPORT_SYMBOL_GPL(vfio_group_set_kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) static int vfio_register_group_notifier(struct vfio_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 					unsigned long *events,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 					struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	bool set_kvm = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	if (*events & VFIO_GROUP_NOTIFY_SET_KVM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		set_kvm = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	/* clear known events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	*events &= ~VFIO_GROUP_NOTIFY_SET_KVM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	/* refuse to continue if still events remaining */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	if (*events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	ret = vfio_group_add_container_user(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	ret = blocking_notifier_chain_register(&group->notifier, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	 * The attaching of kvm and vfio_group might already happen, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 	 * here we replay once upon registration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	if (!ret && set_kvm && group->kvm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		blocking_notifier_call_chain(&group->notifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 					VFIO_GROUP_NOTIFY_SET_KVM, group->kvm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	vfio_group_try_dissolve_container(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) static int vfio_unregister_group_notifier(struct vfio_group *group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 					 struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	ret = vfio_group_add_container_user(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	ret = blocking_notifier_chain_unregister(&group->notifier, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	vfio_group_try_dissolve_container(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) int vfio_register_notifier(struct device *dev, enum vfio_notify_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 			   unsigned long *events, struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	struct vfio_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	if (!dev || !nb || !events || (*events == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	group = vfio_group_get_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	case VFIO_IOMMU_NOTIFY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		ret = vfio_register_iommu_notifier(group, events, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	case VFIO_GROUP_NOTIFY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		ret = vfio_register_group_notifier(group, events, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) EXPORT_SYMBOL(vfio_register_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 			     struct notifier_block *nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	struct vfio_group *group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	if (!dev || !nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	group = vfio_group_get_from_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	if (!group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	case VFIO_IOMMU_NOTIFY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 		ret = vfio_unregister_iommu_notifier(group, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	case VFIO_GROUP_NOTIFY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		ret = vfio_unregister_group_notifier(group, nb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	vfio_group_put(group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) EXPORT_SYMBOL(vfio_unregister_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)  * Module/class support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) static char *vfio_devnode(struct device *dev, umode_t *mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 	return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) static struct miscdevice vfio_dev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	.minor = VFIO_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	.name = "vfio",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	.fops = &vfio_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	.nodename = "vfio/vfio",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	.mode = S_IRUGO | S_IWUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) static int __init vfio_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	idr_init(&vfio.group_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	mutex_init(&vfio.group_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	mutex_init(&vfio.iommu_drivers_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	INIT_LIST_HEAD(&vfio.group_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	INIT_LIST_HEAD(&vfio.iommu_drivers_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	init_waitqueue_head(&vfio.release_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	ret = misc_register(&vfio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		pr_err("vfio: misc device register failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	/* /dev/vfio/$GROUP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	vfio.class = class_create(THIS_MODULE, "vfio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	if (IS_ERR(vfio.class)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 		ret = PTR_ERR(vfio.class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		goto err_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	vfio.class->devnode = vfio_devnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK + 1, "vfio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		goto err_alloc_chrdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	cdev_init(&vfio.group_cdev, &vfio_group_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 		goto err_cdev_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) #ifdef CONFIG_VFIO_NOIOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	vfio_register_iommu_driver(&vfio_noiommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) err_cdev_add:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) err_alloc_chrdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	class_destroy(vfio.class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	vfio.class = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) err_class:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	misc_deregister(&vfio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) static void __exit vfio_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	WARN_ON(!list_empty(&vfio.group_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) #ifdef CONFIG_VFIO_NOIOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	vfio_unregister_iommu_driver(&vfio_noiommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	idr_destroy(&vfio.group_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	cdev_del(&vfio.group_cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	class_destroy(vfio.class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	vfio.class = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	misc_deregister(&vfio_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) module_init(vfio_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) module_exit(vfio_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) MODULE_VERSION(DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) MODULE_AUTHOR(DRIVER_AUTHOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) MODULE_DESCRIPTION(DRIVER_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) MODULE_ALIAS_MISCDEV(VFIO_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) MODULE_ALIAS("devname:vfio/vfio");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) MODULE_SOFTDEP("post: vfio_iommu_type1 vfio_iommu_spapr_tce");