Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * evtchn.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Driver for receiving and demuxing event-channel signals.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (c) 2004-2005, K A Fraser
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Multi-process extensions Copyright (c) 2004, Steven Smith
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * modify it under the terms of the GNU General Public License version 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * as published by the Free Software Foundation; or, when distributed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * separately from the Linux kernel or incorporated into other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * software packages, subject to the following license:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * Permission is hereby granted, free of charge, to any person obtaining a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * of this source file (the "Software"), to deal in the Software without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * restriction, including without limitation the rights to use, copy, modify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * and to permit persons to whom the Software is furnished to do so, subject to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * the following conditions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * The above copyright notice and this permission notice shall be included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * all copies or substantial portions of the Software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * IN THE SOFTWARE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #include <linux/major.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #include <xen/evtchn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #include <xen/xen-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #include <asm/xen/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) struct per_user_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	struct mutex bind_mutex; /* serialize bind/unbind operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	struct rb_root evtchns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	unsigned int nr_evtchns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	/* Notification ring, accessed via /dev/xen/evtchn. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	unsigned int ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	evtchn_port_t *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	unsigned int ring_cons, ring_prod, ring_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	struct mutex ring_cons_mutex; /* protect against concurrent readers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	spinlock_t ring_prod_lock; /* product against concurrent interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	/* Processes wait on this queue when ring is empty. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	wait_queue_head_t evtchn_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	struct fasync_struct *evtchn_async_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	domid_t restrict_domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) #define UNRESTRICTED_DOMID ((domid_t)-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) struct user_evtchn {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	struct rb_node node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	struct per_user_data *user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	evtchn_port_t port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	bool enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) static void evtchn_free_ring(evtchn_port_t *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	kvfree(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static unsigned int evtchn_ring_offset(struct per_user_data *u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 				       unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	return idx & (u->ring_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static evtchn_port_t *evtchn_ring_entry(struct per_user_data *u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 					unsigned int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	return u->ring + evtchn_ring_offset(u, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	u->nr_evtchns++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	while (*new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		struct user_evtchn *this;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		this = rb_entry(*new, struct user_evtchn, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		parent = *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		if (this->port < evtchn->port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			new = &((*new)->rb_left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		else if (this->port > evtchn->port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 			new = &((*new)->rb_right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 			return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	/* Add new node and rebalance tree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	rb_link_node(&evtchn->node, parent, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	rb_insert_color(&evtchn->node, &u->evtchns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	u->nr_evtchns--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	rb_erase(&evtchn->node, &u->evtchns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	kfree(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static struct user_evtchn *find_evtchn(struct per_user_data *u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 				       evtchn_port_t port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	struct rb_node *node = u->evtchns.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		struct user_evtchn *evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		evtchn = rb_entry(node, struct user_evtchn, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		if (evtchn->port < port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			node = node->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		else if (evtchn->port > port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			node = node->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			return evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static irqreturn_t evtchn_interrupt(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	struct user_evtchn *evtchn = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	struct per_user_data *u = evtchn->user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	WARN(!evtchn->enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	     "Interrupt for port %u, but apparently not enabled; per-user %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	     evtchn->port, u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	evtchn->enabled = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	spin_lock(&u->ring_prod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	if ((u->ring_prod - u->ring_cons) < u->ring_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		*evtchn_ring_entry(u, u->ring_prod) = evtchn->port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		wmb(); /* Ensure ring contents visible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		if (u->ring_cons == u->ring_prod++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 			wake_up_interruptible(&u->evtchn_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			kill_fasync(&u->evtchn_async_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 				    SIGIO, POLL_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		u->ring_overflow = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	spin_unlock(&u->ring_prod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static ssize_t evtchn_read(struct file *file, char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			   size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	unsigned int c, p, bytes1 = 0, bytes2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	struct per_user_data *u = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	/* Whole number of ports. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	count &= ~(sizeof(evtchn_port_t)-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	if (count > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		count = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		mutex_lock(&u->ring_cons_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		rc = -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		if (u->ring_overflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 			goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		c = u->ring_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		p = u->ring_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		if (c != p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		mutex_unlock(&u->ring_cons_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		if (file->f_flags & O_NONBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		rc = wait_event_interruptible(u->evtchn_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 					      u->ring_cons != u->ring_prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	if (((c ^ p) & u->ring_size) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		bytes1 = (u->ring_size - evtchn_ring_offset(u, c)) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 			sizeof(evtchn_port_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		bytes2 = evtchn_ring_offset(u, p) * sizeof(evtchn_port_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		bytes1 = (p - c) * sizeof(evtchn_port_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		bytes2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	/* Truncate chunks according to caller's maximum byte count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	if (bytes1 > count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		bytes1 = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		bytes2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	} else if ((bytes1 + bytes2) > count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		bytes2 = count - bytes1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	rmb(); /* Ensure that we see the port before we copy it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	if (copy_to_user(buf, evtchn_ring_entry(u, c), bytes1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	    ((bytes2 != 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	     copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	rc = bytes1 + bytes2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	mutex_unlock(&u->ring_cons_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static ssize_t evtchn_write(struct file *file, const char __user *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			    size_t count, loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	struct per_user_data *u = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	if (kbuf == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	/* Whole number of ports. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	count &= ~(sizeof(evtchn_port_t)-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	if (count == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	if (count > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		count = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	if (copy_from_user(kbuf, buf, count) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	mutex_lock(&u->bind_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		evtchn_port_t port = kbuf[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		struct user_evtchn *evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		evtchn = find_evtchn(u, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		if (evtchn && !evtchn->enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 			evtchn->enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 			xen_irq_lateeoi(irq_from_evtchn(port), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	mutex_unlock(&u->bind_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	rc = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	free_page((unsigned long)kbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static int evtchn_resize_ring(struct per_user_data *u)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	unsigned int new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	evtchn_port_t *new_ring, *old_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	 * Ensure the ring is large enough to capture all possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	 * events. i.e., one free slot for each bound event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	if (u->nr_evtchns <= u->ring_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (u->ring_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		new_size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		new_size = 2 * u->ring_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	new_ring = kvmalloc_array(new_size, sizeof(*new_ring), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	if (!new_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	old_ring = u->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	 * Access to the ring contents is serialized by either the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	 * prod /or/ cons lock so take both when resizing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	mutex_lock(&u->ring_cons_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	spin_lock_irq(&u->ring_prod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	 * Copy the old ring contents to the new ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	 * To take care of wrapping, a full ring, and the new index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	 * pointing into the second half, simply copy the old contents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	 * twice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	 * +---------+    +------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	 * |34567  12| -> |34567  1234567  12|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	 * +-----p-c-+    +-------c------p---+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	memcpy(new_ring + u->ring_size, old_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	       u->ring_size * sizeof(*u->ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	u->ring = new_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	u->ring_size = new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	spin_unlock_irq(&u->ring_prod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	mutex_unlock(&u->ring_cons_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	evtchn_free_ring(old_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	struct user_evtchn *evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	struct evtchn_close close;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	 * Ports are never reused, so every caller should pass in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	 * unique port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	 * (Locking not necessary because we haven't registered the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	 * interrupt handler yet, and our caller has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	 * serialized bind operations.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	evtchn = kzalloc(sizeof(*evtchn), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	if (!evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	evtchn->user = u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	evtchn->port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	evtchn->enabled = true; /* start enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	rc = add_evtchn(u, evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	rc = evtchn_resize_ring(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 					       u->name, evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	rc = evtchn_make_refcounted(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	/* bind failed, should close the port now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	close.port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	del_evtchn(u, evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) static void evtchn_unbind_from_user(struct per_user_data *u,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 				    struct user_evtchn *evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	int irq = irq_from_evtchn(evtchn->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	BUG_ON(irq < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	unbind_from_irqhandler(irq, evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	del_evtchn(u, evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static DEFINE_PER_CPU(int, bind_last_selected_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) static void evtchn_bind_interdom_next_vcpu(evtchn_port_t evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	unsigned int selected_cpu, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	struct irq_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	irq = irq_from_evtchn(evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	desc = irq_to_desc(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	raw_spin_lock_irqsave(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	selected_cpu = this_cpu_read(bind_last_selected_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	selected_cpu = cpumask_next_and(selected_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			desc->irq_common_data.affinity, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	if (unlikely(selected_cpu >= nr_cpu_ids))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		selected_cpu = cpumask_first_and(desc->irq_common_data.affinity,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 				cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	this_cpu_write(bind_last_selected_cpu, selected_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	/* unmask expects irqs to be disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	xen_set_affinity_evtchn(desc, selected_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	raw_spin_unlock_irqrestore(&desc->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static long evtchn_ioctl(struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 			 unsigned int cmd, unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	struct per_user_data *u = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	void __user *uarg = (void __user *) arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	/* Prevent bind from racing with unbind */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	mutex_lock(&u->bind_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	case IOCTL_EVTCHN_BIND_VIRQ: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		struct ioctl_evtchn_bind_virq bind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		struct evtchn_bind_virq bind_virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		rc = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		if (u->restrict_domid != UNRESTRICTED_DOMID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		if (copy_from_user(&bind, uarg, sizeof(bind)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		bind_virq.virq = bind.virq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		bind_virq.vcpu = xen_vcpu_nr(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 						 &bind_virq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		rc = evtchn_bind_to_user(u, bind_virq.port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 			rc = bind_virq.port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		struct ioctl_evtchn_bind_interdomain bind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		struct evtchn_bind_interdomain bind_interdomain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		if (copy_from_user(&bind, uarg, sizeof(bind)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		rc = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		if (u->restrict_domid != UNRESTRICTED_DOMID &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		    u->restrict_domid != bind.remote_domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		bind_interdomain.remote_dom  = bind.remote_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		bind_interdomain.remote_port = bind.remote_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 						 &bind_interdomain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 			rc = bind_interdomain.local_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 			evtchn_bind_interdom_next_vcpu(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		struct ioctl_evtchn_bind_unbound_port bind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		struct evtchn_alloc_unbound alloc_unbound;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		rc = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		if (u->restrict_domid != UNRESTRICTED_DOMID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		if (copy_from_user(&bind, uarg, sizeof(bind)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		alloc_unbound.dom        = DOMID_SELF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		alloc_unbound.remote_dom = bind.remote_domain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 						 &alloc_unbound);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		rc = evtchn_bind_to_user(u, alloc_unbound.port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 			rc = alloc_unbound.port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	case IOCTL_EVTCHN_UNBIND: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		struct ioctl_evtchn_unbind unbind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		struct user_evtchn *evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 		rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		if (copy_from_user(&unbind, uarg, sizeof(unbind)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		if (unbind.port >= xen_evtchn_nr_channels())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		rc = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		evtchn = find_evtchn(u, unbind.port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		if (!evtchn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		disable_irq(irq_from_evtchn(unbind.port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		evtchn_unbind_from_user(u, evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	case IOCTL_EVTCHN_NOTIFY: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		struct ioctl_evtchn_notify notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		struct user_evtchn *evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		if (copy_from_user(&notify, uarg, sizeof(notify)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		rc = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		evtchn = find_evtchn(u, notify.port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		if (evtchn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 			notify_remote_via_evtchn(notify.port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 			rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	case IOCTL_EVTCHN_RESET: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		/* Initialise the ring to empty. Clear errors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		mutex_lock(&u->ring_cons_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		spin_lock_irq(&u->ring_prod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		u->ring_cons = u->ring_prod = u->ring_overflow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		spin_unlock_irq(&u->ring_prod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		mutex_unlock(&u->ring_cons_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	case IOCTL_EVTCHN_RESTRICT_DOMID: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		struct ioctl_evtchn_restrict_domid ierd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		rc = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		if (u->restrict_domid != UNRESTRICTED_DOMID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		if (copy_from_user(&ierd, uarg, sizeof(ierd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		    break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		if (ierd.domid == 0 || ierd.domid >= DOMID_FIRST_RESERVED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		u->restrict_domid = ierd.domid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		rc = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	mutex_unlock(&u->bind_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) static __poll_t evtchn_poll(struct file *file, poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	__poll_t mask = EPOLLOUT | EPOLLWRNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	struct per_user_data *u = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	poll_wait(file, &u->evtchn_wait, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	if (u->ring_cons != u->ring_prod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		mask |= EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	if (u->ring_overflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		mask = EPOLLERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static int evtchn_fasync(int fd, struct file *filp, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	struct per_user_data *u = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) static int evtchn_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	struct per_user_data *u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	u = kzalloc(sizeof(*u), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	if (u == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	if (u->name == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		kfree(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	init_waitqueue_head(&u->evtchn_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	mutex_init(&u->bind_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	mutex_init(&u->ring_cons_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	spin_lock_init(&u->ring_prod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	u->restrict_domid = UNRESTRICTED_DOMID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	filp->private_data = u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	return stream_open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) static int evtchn_release(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	struct per_user_data *u = filp->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	while ((node = u->evtchns.rb_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 		struct user_evtchn *evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 		evtchn = rb_entry(node, struct user_evtchn, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		disable_irq(irq_from_evtchn(evtchn->port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 		evtchn_unbind_from_user(u, evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	evtchn_free_ring(u->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	kfree(u->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	kfree(u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) static const struct file_operations evtchn_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	.owner   = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	.read    = evtchn_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	.write   = evtchn_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	.unlocked_ioctl = evtchn_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	.poll    = evtchn_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	.fasync  = evtchn_fasync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	.open    = evtchn_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	.release = evtchn_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	.llseek	 = no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static struct miscdevice evtchn_miscdev = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	.minor        = MISC_DYNAMIC_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	.name         = "xen/evtchn",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	.fops         = &evtchn_fops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static int __init evtchn_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	if (!xen_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	/* Create '/dev/xen/evtchn'. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	err = misc_register(&evtchn_miscdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	if (err != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 		pr_err("Could not register /dev/xen/evtchn\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	pr_info("Event-channel device installed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static void __exit evtchn_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	misc_deregister(&evtchn_miscdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) module_init(evtchn_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) module_exit(evtchn_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) MODULE_LICENSE("GPL");