Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Virtio vhost-user driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright(c) 2019 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * This driver allows virtio devices to be used over a vhost-user socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Guest devices can be instantiated by kernel module or command line
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * parameters. One device will be created for each parameter. Syntax:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *		virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * where:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *		<socket>	:= vhost-user socket path to connect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *		<virtio_id>	:= virtio device id (as in virtio_ids.h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *		<platform_id>	:= (optional) platform device id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *		virtio_uml.device=/var/uml.socket:1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * Based on Virtio MMIO driver by Pawel Moll, copyright 2011-2014, ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/virtio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/virtio_config.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/virtio_ring.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/time-internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <shared/as-layout.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <irq_kern.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <os.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include "vhost_user.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) /* Workaround due to a conflict between irq_user.h and irqreturn.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #ifdef IRQ_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #undef IRQ_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define MAX_SUPPORTED_QUEUE_SIZE	256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define to_virtio_uml_device(_vdev) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	container_of(_vdev, struct virtio_uml_device, vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) struct virtio_uml_platform_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	u32 virtio_device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	const char *socket_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	struct work_struct conn_broken_wk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) struct virtio_uml_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	struct virtio_device vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	spinlock_t sock_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	int sock, req_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	u64 features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	u64 protocol_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	u8 registered:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) struct virtio_uml_vq_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	int kick_fd, call_fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	char name[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	vq_callback_t *callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	struct time_travel_event defer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) extern unsigned long long physmem_size, highmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define vu_err(vu_dev, ...)	dev_err(&(vu_dev)->pdev->dev, ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) /* Vhost-user protocol */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static int full_sendmsg_fds(int fd, const void *buf, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 			    const int *fds, unsigned int fds_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		rc = os_sendmsg_fds(fd, buf, len, fds, fds_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		if (rc > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 			buf += rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 			len -= rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 			fds = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 			fds_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	} while (len && (rc >= 0 || rc == -EINTR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) static int full_read(int fd, void *buf, int len, bool abortable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 		rc = os_read_file(fd, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		if (rc > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 			buf += rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 			len -= rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	} while (len && (rc > 0 || rc == -EINTR || (!abortable && rc == -EAGAIN)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		return -ECONNRESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) static int vhost_user_recv_header(int fd, struct vhost_user_msg *msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	return full_read(fd, msg, sizeof(msg->header), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) static int vhost_user_recv(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 			   int fd, struct vhost_user_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 			   size_t max_payload_size, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	 * In virtio time-travel mode, we're handling all the vhost-user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	 * FDs by polling them whenever appropriate. However, we may get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	 * into a situation where we're sending out an interrupt message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	 * to a device (e.g. a net device) and need to handle a simulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	 * time message while doing so, e.g. one that tells us to update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	 * our idea of how long we can run without scheduling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	 * Thus, we need to not just read() from the given fd, but need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	 * to also handle messages for the simulation time - this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	 * does that for us while waiting for the given fd to be readable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		time_travel_wait_readable(fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	rc = vhost_user_recv_header(fd, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	if (rc == -ECONNRESET && vu_dev->registered) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		struct virtio_uml_platform_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		pdata = vu_dev->pdev->dev.platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 		virtio_break_device(&vu_dev->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		schedule_work(&pdata->conn_broken_wk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	size = msg->header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	if (size > max_payload_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	return full_read(fd, &msg->payload, size, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) static int vhost_user_recv_resp(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 				struct vhost_user_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 				size_t max_payload_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	int rc = vhost_user_recv(vu_dev, vu_dev->sock, msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 				 max_payload_size, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	if (msg->header.flags != (VHOST_USER_FLAG_REPLY | VHOST_USER_VERSION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) static int vhost_user_recv_u64(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 			       u64 *value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	struct vhost_user_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	int rc = vhost_user_recv_resp(vu_dev, &msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 				      sizeof(msg.payload.integer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	if (msg.header.size != sizeof(msg.payload.integer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	*value = msg.payload.integer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) static int vhost_user_recv_req(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 			       struct vhost_user_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 			       size_t max_payload_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	int rc = vhost_user_recv(vu_dev, vu_dev->req_fd, msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 				 max_payload_size, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	if ((msg->header.flags & ~VHOST_USER_FLAG_NEED_REPLY) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 			VHOST_USER_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		return -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) static int vhost_user_send(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 			   bool need_response, struct vhost_user_msg *msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 			   int *fds, size_t num_fds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	size_t size = sizeof(msg->header) + msg->header.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	bool request_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	msg->header.flags |= VHOST_USER_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	 * The need_response flag indicates that we already need a response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	 * e.g. to read the features. In these cases, don't request an ACK as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	 * it is meaningless. Also request an ACK only if supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	request_ack = !need_response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	if (!(vu_dev->protocol_features &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 			BIT_ULL(VHOST_USER_PROTOCOL_F_REPLY_ACK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		request_ack = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	if (request_ack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		msg->header.flags |= VHOST_USER_FLAG_NEED_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	spin_lock_irqsave(&vu_dev->sock_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	rc = full_sendmsg_fds(vu_dev->sock, msg, size, fds, num_fds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	if (request_ack) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		uint64_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		rc = vhost_user_recv_u64(vu_dev, &status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			vu_err(vu_dev, "slave reports error: %llu\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 			rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	spin_unlock_irqrestore(&vu_dev->sock_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) static int vhost_user_send_no_payload(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 				      bool need_response, u32 request)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	struct vhost_user_msg msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		.header.request = request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	return vhost_user_send(vu_dev, need_response, &msg, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) static int vhost_user_send_no_payload_fd(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 					 u32 request, int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	struct vhost_user_msg msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		.header.request = request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	return vhost_user_send(vu_dev, false, &msg, &fd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) static int vhost_user_send_u64(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 			       u32 request, u64 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	struct vhost_user_msg msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		.header.request = request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		.header.size = sizeof(msg.payload.integer),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		.payload.integer = value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	return vhost_user_send(vu_dev, false, &msg, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) static int vhost_user_set_owner(struct virtio_uml_device *vu_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	return vhost_user_send_no_payload(vu_dev, false, VHOST_USER_SET_OWNER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) static int vhost_user_get_features(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 				   u64 *features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	int rc = vhost_user_send_no_payload(vu_dev, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 					    VHOST_USER_GET_FEATURES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	return vhost_user_recv_u64(vu_dev, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) static int vhost_user_set_features(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 				   u64 features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	return vhost_user_send_u64(vu_dev, VHOST_USER_SET_FEATURES, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) static int vhost_user_get_protocol_features(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 					    u64 *protocol_features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	int rc = vhost_user_send_no_payload(vu_dev, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			VHOST_USER_GET_PROTOCOL_FEATURES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	return vhost_user_recv_u64(vu_dev, protocol_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) static int vhost_user_set_protocol_features(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 					    u64 protocol_features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	return vhost_user_send_u64(vu_dev, VHOST_USER_SET_PROTOCOL_FEATURES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 				   protocol_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) static void vhost_user_reply(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			     struct vhost_user_msg *msg, int response)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	struct vhost_user_msg reply = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		.payload.integer = response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	size_t size = sizeof(reply.header) + sizeof(reply.payload.integer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	reply.header = msg->header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	reply.header.flags &= ~VHOST_USER_FLAG_NEED_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	reply.header.flags |= VHOST_USER_FLAG_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	reply.header.size = sizeof(reply.payload.integer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	rc = full_sendmsg_fds(vu_dev->req_fd, &reply, size, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		vu_err(vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		       "sending reply to slave request failed: %d (size %zu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		       rc, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) static irqreturn_t vu_req_interrupt(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	struct virtio_uml_device *vu_dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	int response = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		struct vhost_user_msg msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		u8 extra_payload[512];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	} msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	rc = vhost_user_recv_req(vu_dev, &msg.msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 				 sizeof(msg.msg.payload) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 				 sizeof(msg.extra_payload));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	switch (msg.msg.header.request) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		virtio_config_changed(&vu_dev->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		response = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	case VHOST_USER_SLAVE_VRING_CALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		virtio_device_for_each_vq((&vu_dev->vdev), vq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 			if (vq->index == msg.msg.payload.vring_state.index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 				response = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 				vring_interrupt(0 /* ignored */, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	case VHOST_USER_SLAVE_IOTLB_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		/* not supported - VIRTIO_F_ACCESS_PLATFORM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		/* not supported - VHOST_USER_PROTOCOL_F_HOST_NOTIFIER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		vu_err(vu_dev, "unexpected slave request %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		       msg.msg.header.request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (msg.msg.header.flags & VHOST_USER_FLAG_NEED_REPLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		vhost_user_reply(vu_dev, &msg.msg, response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) static int vhost_user_init_slave_req(struct virtio_uml_device *vu_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	int rc, req_fds[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	/* Use a pipe for slave req fd, SIGIO is not supported for eventfd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	rc = os_pipe(req_fds, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	vu_dev->req_fd = req_fds[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	rc = um_request_irq(VIRTIO_IRQ, vu_dev->req_fd, IRQ_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 			    vu_req_interrupt, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 			    vu_dev->pdev->name, vu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		goto err_close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	rc = vhost_user_send_no_payload_fd(vu_dev, VHOST_USER_SET_SLAVE_REQ_FD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 					   req_fds[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		goto err_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) err_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	um_free_irq(VIRTIO_IRQ, vu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) err_close:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	os_close_file(req_fds[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	/* Close unused write end of request fds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	os_close_file(req_fds[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) static int vhost_user_init(struct virtio_uml_device *vu_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	int rc = vhost_user_set_owner(vu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	rc = vhost_user_get_features(vu_dev, &vu_dev->features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	if (vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		rc = vhost_user_get_protocol_features(vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 				&vu_dev->protocol_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		vu_dev->protocol_features &= VHOST_USER_SUPPORTED_PROTOCOL_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		rc = vhost_user_set_protocol_features(vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 				vu_dev->protocol_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (vu_dev->protocol_features &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 			BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		rc = vhost_user_init_slave_req(vu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) static void vhost_user_get_config(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 				  u32 offset, void *buf, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	u32 cfg_size = offset + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	struct vhost_user_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	size_t payload_size = sizeof(msg->payload.config) + cfg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	size_t msg_size = sizeof(msg->header) + payload_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	if (!(vu_dev->protocol_features &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	      BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	msg = kzalloc(msg_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if (!msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	msg->header.request = VHOST_USER_GET_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	msg->header.size = payload_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	msg->payload.config.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	msg->payload.config.size = cfg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	rc = vhost_user_send(vu_dev, true, msg, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		vu_err(vu_dev, "sending VHOST_USER_GET_CONFIG failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		       rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	rc = vhost_user_recv_resp(vu_dev, msg, msg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		vu_err(vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		       "receiving VHOST_USER_GET_CONFIG response failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		       rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	if (msg->header.size != payload_size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	    msg->payload.config.size != cfg_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		rc = -EPROTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		vu_err(vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		       "Invalid VHOST_USER_GET_CONFIG sizes (payload %d expected %zu, config %u expected %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		       msg->header.size, payload_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		       msg->payload.config.size, cfg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	memcpy(buf, msg->payload.config.payload + offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	kfree(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) static void vhost_user_set_config(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 				  u32 offset, const void *buf, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	struct vhost_user_msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	size_t payload_size = sizeof(msg->payload.config) + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	size_t msg_size = sizeof(msg->header) + payload_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	if (!(vu_dev->protocol_features &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	      BIT_ULL(VHOST_USER_PROTOCOL_F_CONFIG)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	msg = kzalloc(msg_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (!msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	msg->header.request = VHOST_USER_SET_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	msg->header.size = payload_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	msg->payload.config.offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	msg->payload.config.size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	memcpy(msg->payload.config.payload, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	rc = vhost_user_send(vu_dev, false, msg, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		vu_err(vu_dev, "sending VHOST_USER_SET_CONFIG failed: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		       rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	kfree(msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) static int vhost_user_init_mem_region(u64 addr, u64 size, int *fd_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 				      struct vhost_user_mem_region *region_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	unsigned long long mem_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	int rc = phys_mapping(addr, &mem_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	if (WARN(rc < 0, "phys_mapping of 0x%llx returned %d\n", addr, rc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	*fd_out = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	region_out->guest_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	region_out->user_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	region_out->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	region_out->mmap_offset = mem_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	/* Ensure mapping is valid for the entire region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	rc = phys_mapping(addr + size - 1, &mem_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	if (WARN(rc != *fd_out, "phys_mapping of 0x%llx failed: %d != %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		 addr + size - 1, rc, *fd_out))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) static int vhost_user_set_mem_table(struct virtio_uml_device *vu_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	struct vhost_user_msg msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		.header.request = VHOST_USER_SET_MEM_TABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		.header.size = sizeof(msg.payload.mem_regions),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		.payload.mem_regions.num = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	unsigned long reserved = uml_reserved - uml_physmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	int fds[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	 * This is a bit tricky, see also the comment with setup_physmem().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	 * Essentially, setup_physmem() uses a file to mmap() our physmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	 * but the code and data we *already* have is omitted. To us, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	 * is no difference, since they both become part of our address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	 * space and memory consumption. To somebody looking in from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	 * outside, however, it is different because the part of our memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	 * consumption that's already part of the binary (code/data) is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	 * mapped from the file, so it's not visible to another mmap from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	 * the file descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	 * Thus, don't advertise this space to the vhost-user slave. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	 * means that the slave will likely abort or similar when we give
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	 * it an address from the hidden range, since it's not marked as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	 * a valid address, but at least that way we detect the issue and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	 * don't just have the slave read an all-zeroes buffer from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	 * shared memory file, or write something there that we can never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	 * see (depending on the direction of the virtqueue traffic.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	 * Since we usually don't want to use .text for virtio buffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	 * this effectively means that you cannot use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	 *  1) global variables, which are in the .bss and not in the shm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	 *     file-backed memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	 *  2) the stack in some processes, depending on where they have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	 *     their stack (or maybe only no interrupt stack?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	 * The stack is already not typically valid for DMA, so this isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	 * much of a restriction, but global variables might be encountered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	 * It might be possible to fix it by copying around the data that's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	 * between bss_start and where we map the file now, but it's not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	 * something that you typically encounter with virtio drivers, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	 * it didn't seem worthwhile.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	rc = vhost_user_init_mem_region(reserved, physmem_size - reserved,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 					&fds[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 					&msg.payload.mem_regions.regions[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	if (highmem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		msg.payload.mem_regions.num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		rc = vhost_user_init_mem_region(__pa(end_iomem), highmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 				&fds[1], &msg.payload.mem_regions.regions[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	return vhost_user_send(vu_dev, false, &msg, fds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			       msg.payload.mem_regions.num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) static int vhost_user_set_vring_state(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 				      u32 request, u32 index, u32 num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	struct vhost_user_msg msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		.header.request = request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		.header.size = sizeof(msg.payload.vring_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		.payload.vring_state.index = index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		.payload.vring_state.num = num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	return vhost_user_send(vu_dev, false, &msg, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) static int vhost_user_set_vring_num(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 				    u32 index, u32 num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_NUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 					  index, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) static int vhost_user_set_vring_base(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 				     u32 index, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_BASE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 					  index, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) static int vhost_user_set_vring_addr(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 				     u32 index, u64 desc, u64 used, u64 avail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 				     u64 log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	struct vhost_user_msg msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		.header.request = VHOST_USER_SET_VRING_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		.header.size = sizeof(msg.payload.vring_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		.payload.vring_addr.index = index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		.payload.vring_addr.desc = desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		.payload.vring_addr.used = used,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		.payload.vring_addr.avail = avail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		.payload.vring_addr.log = log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	return vhost_user_send(vu_dev, false, &msg, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) static int vhost_user_set_vring_fd(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 				   u32 request, int index, int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	struct vhost_user_msg msg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		.header.request = request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		.header.size = sizeof(msg.payload.integer),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		.payload.integer = index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	if (index & ~VHOST_USER_VRING_INDEX_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	if (fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		msg.payload.integer |= VHOST_USER_VRING_POLL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		return vhost_user_send(vu_dev, false, &msg, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	return vhost_user_send(vu_dev, false, &msg, &fd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) static int vhost_user_set_vring_call(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 				     int index, int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_CALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 				       index, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) static int vhost_user_set_vring_kick(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 				     int index, int fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	return vhost_user_set_vring_fd(vu_dev, VHOST_USER_SET_VRING_KICK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 				       index, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) static int vhost_user_set_vring_enable(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 				       u32 index, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	if (!(vu_dev->features & BIT_ULL(VHOST_USER_F_PROTOCOL_FEATURES)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	return vhost_user_set_vring_state(vu_dev, VHOST_USER_SET_VRING_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 					  index, enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) /* Virtio interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) static bool vu_notify(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	struct virtio_uml_vq_info *info = vq->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	const uint64_t n = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	time_travel_propagate_time();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (info->kick_fd < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		struct virtio_uml_device *vu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		vu_dev = to_virtio_uml_device(vq->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		return vhost_user_set_vring_state(vu_dev, VHOST_USER_VRING_KICK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 						  vq->index, 0) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		rc = os_write_file(info->kick_fd, &n, sizeof(n));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	} while (rc == -EINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	return !WARN(rc != sizeof(n), "write returned %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) static irqreturn_t vu_interrupt(int irq, void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	struct virtqueue *vq = opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	struct virtio_uml_vq_info *info = vq->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	uint64_t n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	irqreturn_t ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		rc = os_read_file(info->call_fd, &n, sizeof(n));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		if (rc == sizeof(n))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			ret |= vring_interrupt(irq, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	} while (rc == sizeof(n) || rc == -EINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	WARN(rc != -EAGAIN, "read returned %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) static void vu_get(struct virtio_device *vdev, unsigned offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		   void *buf, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	vhost_user_get_config(vu_dev, offset, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) static void vu_set(struct virtio_device *vdev, unsigned offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		   const void *buf, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	vhost_user_set_config(vu_dev, offset, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) static u8 vu_get_status(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	return vu_dev->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) static void vu_set_status(struct virtio_device *vdev, u8 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	vu_dev->status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) static void vu_reset(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	vu_dev->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) static void vu_del_vq(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	struct virtio_uml_vq_info *info = vq->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	if (info->call_fd >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		um_free_irq(VIRTIO_IRQ, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		os_close_file(info->call_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	if (info->kick_fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		os_close_file(info->kick_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	vring_del_virtqueue(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	kfree(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) static void vu_del_vqs(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	struct virtqueue *vq, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	u64 features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	/* Note: reverse order as a workaround to a decoding bug in snabb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	list_for_each_entry_reverse(vq, &vdev->vqs, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		WARN_ON(vhost_user_set_vring_enable(vu_dev, vq->index, false));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	/* Ensure previous messages have been processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	WARN_ON(vhost_user_get_features(vu_dev, &features));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	list_for_each_entry_safe(vq, n, &vdev->vqs, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		vu_del_vq(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) static int vu_setup_vq_call_fd(struct virtio_uml_device *vu_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			       struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	struct virtio_uml_vq_info *info = vq->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	int call_fds[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	/* no call FD needed/desired in this case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	if (vu_dev->protocol_features &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 			BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	    vu_dev->protocol_features &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 			BIT_ULL(VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		info->call_fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	/* Use a pipe for call fd, since SIGIO is not supported for eventfd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	rc = os_pipe(call_fds, true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	info->call_fd = call_fds[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	rc = um_request_irq(VIRTIO_IRQ, info->call_fd, IRQ_READ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 			    vu_interrupt, IRQF_SHARED, info->name, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		goto close_both;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	rc = vhost_user_set_vring_call(vu_dev, vq->index, call_fds[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		goto release_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) release_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	um_free_irq(VIRTIO_IRQ, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) close_both:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	os_close_file(call_fds[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	/* Close (unused) write end of call fds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	os_close_file(call_fds[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) static void vu_defer_irq_handle(struct time_travel_event *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	struct virtio_uml_vq_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	info = container_of(d, struct virtio_uml_vq_info, defer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	info->callback(info->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) static void vu_defer_irq_callback(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	struct virtio_uml_vq_info *info = vq->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	time_travel_add_irq_event(&info->defer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) static struct virtqueue *vu_setup_vq(struct virtio_device *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 				     unsigned index, vq_callback_t *callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 				     const char *name, bool ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	struct platform_device *pdev = vu_dev->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	struct virtio_uml_vq_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	int num = MAX_SUPPORTED_QUEUE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	info = kzalloc(sizeof(*info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	if (!info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		goto error_kzalloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	snprintf(info->name, sizeof(info->name), "%s.%d-%s", pdev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		 pdev->id, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	 * When we get an interrupt, we must bounce it through the simulation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	 * calendar (the simtime device), except for the simtime device itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	 * since that's part of the simulation control.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	if (time_travel_mode == TT_MODE_EXTERNAL && callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		info->callback = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		callback = vu_defer_irq_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		time_travel_set_event_fn(&info->defer, vu_defer_irq_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	vq = vring_create_virtqueue(index, num, PAGE_SIZE, vdev, true, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 				    ctx, vu_notify, callback, info->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	if (!vq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		goto error_create;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	vq->priv = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	num = virtqueue_get_vring_size(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) #ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	info->vq = vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	if (vu_dev->protocol_features &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 			BIT_ULL(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		info->kick_fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		rc = os_eventfd(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			goto error_kick;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		info->kick_fd = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	rc = vu_setup_vq_call_fd(vu_dev, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		goto error_call;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	rc = vhost_user_set_vring_num(vu_dev, index, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		goto error_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	rc = vhost_user_set_vring_base(vu_dev, index, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		goto error_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	rc = vhost_user_set_vring_addr(vu_dev, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 				       virtqueue_get_desc_addr(vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 				       virtqueue_get_used_addr(vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 				       virtqueue_get_avail_addr(vq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 				       (u64) -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		goto error_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	return vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) error_setup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	if (info->call_fd >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		um_free_irq(VIRTIO_IRQ, vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		os_close_file(info->call_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) error_call:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	if (info->kick_fd >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		os_close_file(info->kick_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) error_kick:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	vring_del_virtqueue(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) error_create:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	kfree(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) error_kzalloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	return ERR_PTR(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		       struct virtqueue *vqs[], vq_callback_t *callbacks[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		       const char * const names[], const bool *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		       struct irq_affinity *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	int i, queue_idx = 0, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	rc = vhost_user_set_mem_table(vu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	for (i = 0; i < nvqs; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		if (!names[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 			vqs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		vqs[i] = vu_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 				     ctx ? ctx[i] : false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		if (IS_ERR(vqs[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 			rc = PTR_ERR(vqs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 			goto error_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	list_for_each_entry(vq, &vdev->vqs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		struct virtio_uml_vq_info *info = vq->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		if (info->kick_fd >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 			rc = vhost_user_set_vring_kick(vu_dev, vq->index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 						       info->kick_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 				goto error_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		rc = vhost_user_set_vring_enable(vu_dev, vq->index, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 			goto error_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) error_setup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	vu_del_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) static u64 vu_get_features(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	return vu_dev->features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) static int vu_finalize_features(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	u64 supported = vdev->features & VHOST_USER_SUPPORTED_F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	vring_transport_features(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	vu_dev->features = vdev->features | supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	return vhost_user_set_features(vu_dev, vu_dev->features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static const char *vu_bus_name(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	return vu_dev->pdev->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) static const struct virtio_config_ops virtio_uml_config_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	.get = vu_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	.set = vu_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	.get_status = vu_get_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	.set_status = vu_set_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	.reset = vu_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	.find_vqs = vu_find_vqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	.del_vqs = vu_del_vqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	.get_features = vu_get_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	.finalize_features = vu_finalize_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	.bus_name = vu_bus_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) static void virtio_uml_release_dev(struct device *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	struct virtio_device *vdev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			container_of(d, struct virtio_device, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	time_travel_propagate_time();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	/* might not have been opened due to not negotiating the feature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	if (vu_dev->req_fd >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		um_free_irq(VIRTIO_IRQ, vu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		os_close_file(vu_dev->req_fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	os_close_file(vu_dev->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	kfree(vu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /* Platform device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static int virtio_uml_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	struct virtio_uml_device *vu_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	if (!pdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	vu_dev = kzalloc(sizeof(*vu_dev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	if (!vu_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	vu_dev->vdev.dev.parent = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	vu_dev->vdev.dev.release = virtio_uml_release_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	vu_dev->vdev.config = &virtio_uml_config_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	vu_dev->vdev.id.device = pdata->virtio_device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	vu_dev->vdev.id.vendor = VIRTIO_DEV_ANY_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	vu_dev->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	vu_dev->req_fd = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	time_travel_propagate_time();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		rc = os_connect_socket(pdata->socket_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	} while (rc == -EINTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		goto error_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	vu_dev->sock = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	spin_lock_init(&vu_dev->sock_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	rc = vhost_user_init(vu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		goto error_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	platform_set_drvdata(pdev, vu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	rc = register_virtio_device(&vu_dev->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		put_device(&vu_dev->vdev.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	vu_dev->registered = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) error_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	os_close_file(vu_dev->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) error_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	kfree(vu_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static int virtio_uml_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	struct virtio_uml_device *vu_dev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	unregister_virtio_device(&vu_dev->vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) /* Command line device list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) static void vu_cmdline_release_dev(struct device *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) static struct device vu_cmdline_parent = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	.init_name = "virtio-uml-cmdline",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	.release = vu_cmdline_release_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static bool vu_cmdline_parent_registered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) static int vu_cmdline_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) static int vu_unregister_cmdline_device(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	kfree(pdata->socket_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	platform_device_unregister(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static void vu_conn_broken(struct work_struct *wk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	struct virtio_uml_platform_data *pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	pdata = container_of(wk, struct virtio_uml_platform_data, conn_broken_wk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	vu_unregister_cmdline_device(&pdata->pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) static int vu_cmdline_set(const char *device, const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	const char *ids = strchr(device, ':');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	unsigned int virtio_device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	int processed, consumed, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	char *socket_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	struct virtio_uml_platform_data pdata, *ppdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	if (!ids || ids == device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	processed = sscanf(ids, ":%u%n:%d%n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 			   &virtio_device_id, &consumed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 			   &vu_cmdline_id, &consumed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	if (processed < 1 || ids[consumed])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	if (!vu_cmdline_parent_registered) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		err = device_register(&vu_cmdline_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 			pr_err("Failed to register parent device!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			put_device(&vu_cmdline_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		vu_cmdline_parent_registered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	socket_path = kmemdup_nul(device, ids - device, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	if (!socket_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	pdata.virtio_device_id = (u32) virtio_device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	pdata.socket_path = socket_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	pr_info("Registering device virtio-uml.%d id=%d at %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		vu_cmdline_id, virtio_device_id, socket_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	pdev = platform_device_register_data(&vu_cmdline_parent, "virtio-uml",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 					     vu_cmdline_id++, &pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 					     sizeof(pdata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	err = PTR_ERR_OR_ZERO(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		goto free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	ppdata = pdev->dev.platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	ppdata->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	INIT_WORK(&ppdata->conn_broken_wk, vu_conn_broken);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	kfree(socket_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) static int vu_cmdline_get_device(struct device *dev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	struct platform_device *pdev = to_platform_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	struct virtio_uml_platform_data *pdata = pdev->dev.platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	char *buffer = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	unsigned int len = strlen(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	snprintf(buffer + len, PAGE_SIZE - len, "%s:%d:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		 pdata->socket_path, pdata->virtio_device_id, pdev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) static int vu_cmdline_get(char *buffer, const struct kernel_param *kp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	buffer[0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	if (vu_cmdline_parent_registered)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		device_for_each_child(&vu_cmdline_parent, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 				      vu_cmdline_get_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	return strlen(buffer) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static const struct kernel_param_ops vu_cmdline_param_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	.set = vu_cmdline_set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	.get = vu_cmdline_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) device_param_cb(device, &vu_cmdline_param_ops, NULL, S_IRUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) __uml_help(vu_cmdline_param_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) "virtio_uml.device=<socket>:<virtio_id>[:<platform_id>]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) "    Configure a virtio device over a vhost-user socket.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) "    See virtio_ids.h for a list of possible virtio device id values.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) "    Optionally use a specific platform_device id.\n\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) static void vu_unregister_cmdline_devices(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	if (vu_cmdline_parent_registered) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		device_for_each_child(&vu_cmdline_parent, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 				      vu_unregister_cmdline_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		device_unregister(&vu_cmdline_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		vu_cmdline_parent_registered = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /* Platform driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static const struct of_device_id virtio_uml_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	{ .compatible = "virtio,uml", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) MODULE_DEVICE_TABLE(of, virtio_uml_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) static struct platform_driver virtio_uml_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	.probe = virtio_uml_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	.remove = virtio_uml_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		.name = "virtio-uml",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		.of_match_table = virtio_uml_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) static int __init virtio_uml_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	return platform_driver_register(&virtio_uml_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) static void __exit virtio_uml_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	platform_driver_unregister(&virtio_uml_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	vu_unregister_cmdline_devices();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) module_init(virtio_uml_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) module_exit(virtio_uml_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) __uml_exitcall(virtio_uml_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) MODULE_DESCRIPTION("UML driver for vhost-user virtio devices");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) MODULE_LICENSE("GPL");