^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * (c) 2017 Stefano Stabellini <stefano@aporeto.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/inet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/radix-tree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/semaphore.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <net/inet_common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <net/inet_connection_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <net/request_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <xen/events.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <xen/grant_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <xen/xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <xen/xenbus.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <xen/interface/io/pvcalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define PVCALLS_VERSIONS "1"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define MAX_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static struct pvcalls_back_global {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct list_head frontends;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct semaphore frontends_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) } pvcalls_back_global;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Per-frontend data structure. It contains pointers to the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * ring, its event channel, a list of active sockets and a tree of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * passive sockets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct pvcalls_fedata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct xenbus_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct xen_pvcalls_sring *sring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct xen_pvcalls_back_ring ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct list_head socket_mappings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct radix_tree_root socketpass_mappings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct semaphore socket_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct pvcalls_ioworker {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct work_struct register_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct sock_mapping {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct pvcalls_fedata *fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct sockpass_mapping *sockpass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) uint64_t id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) grant_ref_t ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct pvcalls_data_intf *ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void *bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct pvcalls_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) uint32_t ring_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) atomic_t read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) atomic_t write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) atomic_t io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) atomic_t release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) atomic_t eoi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) void (*saved_data_ready)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct pvcalls_ioworker ioworker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct sockpass_mapping {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct pvcalls_fedata *fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) uint64_t id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct xen_pvcalls_request reqcopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) spinlock_t copy_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct workqueue_struct *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct work_struct register_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void (*saved_data_ready)(struct sock *sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static int pvcalls_back_release_active(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct pvcalls_fedata *fedata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct sock_mapping *map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static bool pvcalls_conn_back_read(void *opaque)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct sock_mapping *map = (struct sock_mapping *)opaque;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct msghdr msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct kvec vec[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) RING_IDX cons, prod, size, wanted, array_size, masked_prod, masked_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) int32_t error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) struct pvcalls_data_intf *intf = map->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct pvcalls_data *data = &map->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) array_size = XEN_FLEX_RING_SIZE(map->ring_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) cons = intf->in_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) prod = intf->in_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) error = intf->in_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* read the indexes first, then deal with the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) virt_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) size = pvcalls_queued(prod, cons, array_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (size >= array_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) atomic_set(&map->read, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) wanted = array_size - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) masked_prod = pvcalls_mask(prod, array_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) masked_cons = pvcalls_mask(cons, array_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) memset(&msg, 0, sizeof(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (masked_prod < masked_cons) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) vec[0].iov_base = data->in + masked_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) vec[0].iov_len = wanted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) iov_iter_kvec(&msg.msg_iter, WRITE, vec, 1, wanted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) vec[0].iov_base = data->in + masked_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) vec[0].iov_len = array_size - masked_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) vec[1].iov_base = data->in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) vec[1].iov_len = wanted - vec[0].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) iov_iter_kvec(&msg.msg_iter, WRITE, vec, 2, wanted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) atomic_set(&map->read, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) WARN_ON(ret > wanted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (ret == -EAGAIN) /* shouldn't happen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ret = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) atomic_inc(&map->read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* write the data, then modify the indexes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) virt_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) atomic_set(&map->read, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) intf->in_error = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) intf->in_prod = prod + ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /* update the indexes, then notify the other end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) virt_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) notify_remote_via_irq(map->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static bool pvcalls_conn_back_write(struct sock_mapping *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct pvcalls_data_intf *intf = map->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct pvcalls_data *data = &map->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct msghdr msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct kvec vec[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) RING_IDX cons, prod, size, array_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) cons = intf->out_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) prod = intf->out_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* read the indexes before dealing with the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) virt_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) array_size = XEN_FLEX_RING_SIZE(map->ring_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) size = pvcalls_queued(prod, cons, array_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) memset(&msg, 0, sizeof(msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) msg.msg_flags |= MSG_DONTWAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) vec[0].iov_len = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) iov_iter_kvec(&msg.msg_iter, READ, vec, 1, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) vec[1].iov_base = data->out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) vec[1].iov_len = size - vec[0].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) iov_iter_kvec(&msg.msg_iter, READ, vec, 2, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) atomic_set(&map->write, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ret = inet_sendmsg(map->sock, &msg, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) atomic_inc(&map->write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) atomic_inc(&map->io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* write the data, then update the indexes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) virt_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) intf->out_error = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) intf->out_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) intf->out_cons = cons + ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) prod = intf->out_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* update the indexes, then notify the other end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) virt_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (prod != cons + ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) atomic_inc(&map->write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) atomic_inc(&map->io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) notify_remote_via_irq(map->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static void pvcalls_back_ioworker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct pvcalls_ioworker *ioworker = container_of(work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) struct pvcalls_ioworker, register_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct sock_mapping *map = container_of(ioworker, struct sock_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) ioworker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) while (atomic_read(&map->io) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (atomic_read(&map->release) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) atomic_set(&map->release, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (atomic_read(&map->read) > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) pvcalls_conn_back_read(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) eoi_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (atomic_read(&map->write) > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) pvcalls_conn_back_write(map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) eoi_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (atomic_read(&map->eoi) > 0 && !atomic_read(&map->write)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) atomic_set(&map->eoi, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) xen_irq_lateeoi(map->irq, eoi_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) eoi_flags = XEN_EOI_FLAG_SPURIOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) atomic_dec(&map->io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static int pvcalls_back_socket(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct xen_pvcalls_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct pvcalls_fedata *fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct xen_pvcalls_response *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) fedata = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (req->u.socket.domain != AF_INET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) req->u.socket.type != SOCK_STREAM ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) (req->u.socket.protocol != IPPROTO_IP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) req->u.socket.protocol != AF_INET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ret = -EAFNOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /* leave the actual socket allocation for later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) rsp->req_id = req->req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) rsp->cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) rsp->u.socket.id = req->u.socket.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) rsp->ret = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static void pvcalls_sk_state_change(struct sock *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct sock_mapping *map = sock->sk_user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (map == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) atomic_inc(&map->read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) notify_remote_via_irq(map->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static void pvcalls_sk_data_ready(struct sock *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct sock_mapping *map = sock->sk_user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct pvcalls_ioworker *iow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (map == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) iow = &map->ioworker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) atomic_inc(&map->read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) atomic_inc(&map->io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) queue_work(iow->wq, &iow->register_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static struct sock_mapping *pvcalls_new_active_socket(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct pvcalls_fedata *fedata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) uint64_t id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) grant_ref_t ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) evtchn_port_t evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct socket *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct sock_mapping *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) void *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) map = kzalloc(sizeof(*map), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (map == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) map->fedata = fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) map->sock = sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) map->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) map->ref = ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) ret = xenbus_map_ring_valloc(fedata->dev, &ref, 1, &page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) map->ring = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) map->ring_order = map->ring->ring_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* first read the order, then map the data ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) virt_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (map->ring_order > MAX_RING_ORDER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) pr_warn("%s frontend requested ring_order %u, which is > MAX (%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) __func__, map->ring_order, MAX_RING_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) (1 << map->ring_order), &page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) map->bytes = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) ret = bind_interdomain_evtchn_to_irqhandler_lateeoi(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) fedata->dev->otherend_id, evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) pvcalls_back_conn_event, 0, "pvcalls-backend", map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) map->irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) map->data.in = map->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (!map->ioworker.wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) atomic_set(&map->io, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) down(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) list_add_tail(&map->list, &fedata->socket_mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) up(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) write_lock_bh(&map->sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) map->saved_data_ready = map->sock->sk->sk_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) map->sock->sk->sk_user_data = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) map->sock->sk->sk_data_ready = pvcalls_sk_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) map->sock->sk->sk_state_change = pvcalls_sk_state_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) write_unlock_bh(&map->sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) down(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) list_del(&map->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) pvcalls_back_release_active(fedata->dev, fedata, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) up(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static int pvcalls_back_connect(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct xen_pvcalls_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct pvcalls_fedata *fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct sock_mapping *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct xen_pvcalls_response *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct sockaddr *sa = (struct sockaddr *)&req->u.connect.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) fedata = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (req->u.connect.len < sizeof(sa->sa_family) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) req->u.connect.len > sizeof(req->u.connect.addr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) sa->sa_family != AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ret = inet_stream_connect(sock, sa, req->u.connect.len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) sock_release(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) map = pvcalls_new_active_socket(fedata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) req->u.connect.id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) req->u.connect.ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) req->u.connect.evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) sock_release(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) rsp->req_id = req->req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) rsp->cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) rsp->u.connect.id = req->u.connect.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) rsp->ret = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int pvcalls_back_release_active(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct pvcalls_fedata *fedata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct sock_mapping *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) disable_irq(map->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (map->sock->sk != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) write_lock_bh(&map->sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) map->sock->sk->sk_user_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) map->sock->sk->sk_data_ready = map->saved_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) write_unlock_bh(&map->sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) atomic_set(&map->release, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) flush_work(&map->ioworker.register_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) xenbus_unmap_ring_vfree(dev, map->bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) xenbus_unmap_ring_vfree(dev, (void *)map->ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) unbind_from_irqhandler(map->irq, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) sock_release(map->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static int pvcalls_back_release_passive(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct pvcalls_fedata *fedata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) struct sockpass_mapping *mappass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (mappass->sock->sk != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) write_lock_bh(&mappass->sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) mappass->sock->sk->sk_user_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) mappass->sock->sk->sk_data_ready = mappass->saved_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) write_unlock_bh(&mappass->sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) sock_release(mappass->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) flush_workqueue(mappass->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) destroy_workqueue(mappass->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) kfree(mappass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static int pvcalls_back_release(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct xen_pvcalls_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct pvcalls_fedata *fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct sock_mapping *map, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct sockpass_mapping *mappass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct xen_pvcalls_response *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) fedata = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) down(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (map->id == req->u.release.id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) list_del(&map->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) up(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ret = pvcalls_back_release_active(dev, fedata, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) mappass = radix_tree_lookup(&fedata->socketpass_mappings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) req->u.release.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (mappass != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) radix_tree_delete(&fedata->socketpass_mappings, mappass->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) up(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ret = pvcalls_back_release_passive(dev, fedata, mappass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) up(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) rsp->req_id = req->req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) rsp->u.release.id = req->u.release.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) rsp->cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) rsp->ret = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static void __pvcalls_back_accept(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) struct sockpass_mapping *mappass = container_of(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) work, struct sockpass_mapping, register_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct sock_mapping *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) struct pvcalls_ioworker *iow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct pvcalls_fedata *fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct socket *sock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct xen_pvcalls_response *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct xen_pvcalls_request *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) fedata = mappass->fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * __pvcalls_back_accept can race against pvcalls_back_accept.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * We only need to check the value of "cmd" on read. It could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * done atomically, but to simplify the code on the write side, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * use a spinlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) spin_lock_irqsave(&mappass->copy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) req = &mappass->reqcopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (req->cmd != PVCALLS_ACCEPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) spin_unlock_irqrestore(&mappass->copy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) spin_unlock_irqrestore(&mappass->copy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) sock = sock_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (sock == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) sock->type = mappass->sock->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) sock->ops = mappass->sock->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) sock_release(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) map = pvcalls_new_active_socket(fedata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) req->u.accept.id_new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) req->u.accept.ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) req->u.accept.evtchn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (!map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) sock_release(sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) map->sockpass = mappass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) iow = &map->ioworker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) atomic_inc(&map->read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) atomic_inc(&map->io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) queue_work(iow->wq, &iow->register_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) rsp->req_id = req->req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) rsp->cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) rsp->u.accept.id = req->u.accept.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) rsp->ret = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if (notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) notify_remote_via_irq(fedata->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) mappass->reqcopy.cmd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static void pvcalls_pass_sk_data_ready(struct sock *sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct sockpass_mapping *mappass = sock->sk_user_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct pvcalls_fedata *fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct xen_pvcalls_response *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) int notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (mappass == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) fedata = mappass->fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) spin_lock_irqsave(&mappass->copy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (mappass->reqcopy.cmd == PVCALLS_POLL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) rsp = RING_GET_RESPONSE(&fedata->ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) fedata->ring.rsp_prod_pvt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) rsp->req_id = mappass->reqcopy.req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) rsp->u.poll.id = mappass->reqcopy.u.poll.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) rsp->cmd = mappass->reqcopy.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) rsp->ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) mappass->reqcopy.cmd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) spin_unlock_irqrestore(&mappass->copy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) notify_remote_via_irq(mappass->fedata->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) spin_unlock_irqrestore(&mappass->copy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) queue_work(mappass->wq, &mappass->register_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) static int pvcalls_back_bind(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct xen_pvcalls_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) struct pvcalls_fedata *fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct sockpass_mapping *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct xen_pvcalls_response *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) fedata = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) map = kzalloc(sizeof(*map), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (map == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) INIT_WORK(&map->register_work, __pvcalls_back_accept);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) spin_lock_init(&map->copy_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (!map->wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ret = sock_create(AF_INET, SOCK_STREAM, 0, &map->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ret = inet_bind(map->sock, (struct sockaddr *)&req->u.bind.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) req->u.bind.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) map->fedata = fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) map->id = req->u.bind.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) down(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ret = radix_tree_insert(&fedata->socketpass_mappings, map->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) up(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) write_lock_bh(&map->sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) map->saved_data_ready = map->sock->sk->sk_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) map->sock->sk->sk_user_data = map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) write_unlock_bh(&map->sock->sk->sk_callback_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (map && map->sock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) sock_release(map->sock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (map && map->wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) destroy_workqueue(map->wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) rsp->req_id = req->req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) rsp->cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) rsp->u.bind.id = req->u.bind.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) rsp->ret = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static int pvcalls_back_listen(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct xen_pvcalls_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) struct pvcalls_fedata *fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) struct sockpass_mapping *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct xen_pvcalls_response *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) fedata = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) down(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) up(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (map == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ret = inet_listen(map->sock, req->u.listen.backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) rsp->req_id = req->req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) rsp->cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) rsp->u.listen.id = req->u.listen.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) rsp->ret = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static int pvcalls_back_accept(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct xen_pvcalls_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct pvcalls_fedata *fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct sockpass_mapping *mappass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct xen_pvcalls_response *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) fedata = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) down(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) mappass = radix_tree_lookup(&fedata->socketpass_mappings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) req->u.accept.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) up(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (mappass == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * Limitation of the current implementation: only support one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * concurrent accept or poll call on one socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) spin_lock_irqsave(&mappass->copy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (mappass->reqcopy.cmd != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) spin_unlock_irqrestore(&mappass->copy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) mappass->reqcopy = *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) spin_unlock_irqrestore(&mappass->copy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) queue_work(mappass->wq, &mappass->register_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /* Tell the caller we don't need to send back a notification yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) rsp->req_id = req->req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) rsp->cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) rsp->u.accept.id = req->u.accept.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) rsp->ret = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static int pvcalls_back_poll(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct xen_pvcalls_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct pvcalls_fedata *fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct sockpass_mapping *mappass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct xen_pvcalls_response *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct inet_connection_sock *icsk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct request_sock_queue *queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) bool data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) fedata = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) down(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) mappass = radix_tree_lookup(&fedata->socketpass_mappings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) req->u.poll.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) up(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (mappass == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * Limitation of the current implementation: only support one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * concurrent accept or poll call on one socket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) spin_lock_irqsave(&mappass->copy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (mappass->reqcopy.cmd != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) mappass->reqcopy = *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) icsk = inet_csk(mappass->sock->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) queue = &icsk->icsk_accept_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) data = READ_ONCE(queue->rskq_accept_head) != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) mappass->reqcopy.cmd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) spin_unlock_irqrestore(&mappass->copy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /* Tell the caller we don't need to send back a notification yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) spin_unlock_irqrestore(&mappass->copy_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) rsp->req_id = req->req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) rsp->cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) rsp->u.poll.id = req->u.poll.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) rsp->ret = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) static int pvcalls_back_handle_cmd(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct xen_pvcalls_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) switch (req->cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) case PVCALLS_SOCKET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ret = pvcalls_back_socket(dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) case PVCALLS_CONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ret = pvcalls_back_connect(dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) case PVCALLS_RELEASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) ret = pvcalls_back_release(dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) case PVCALLS_BIND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) ret = pvcalls_back_bind(dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) case PVCALLS_LISTEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ret = pvcalls_back_listen(dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) case PVCALLS_ACCEPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) ret = pvcalls_back_accept(dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) case PVCALLS_POLL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) ret = pvcalls_back_poll(dev, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct pvcalls_fedata *fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct xen_pvcalls_response *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) fedata = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) rsp = RING_GET_RESPONSE(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) &fedata->ring, fedata->ring.rsp_prod_pvt++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) rsp->req_id = req->req_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) rsp->cmd = req->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) rsp->ret = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) static void pvcalls_back_work(struct pvcalls_fedata *fedata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) int notify, notify_all = 0, more = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct xen_pvcalls_request req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) struct xenbus_device *dev = fedata->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) while (more) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) RING_COPY_REQUEST(&fedata->ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) fedata->ring.req_cons++,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) &req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (!pvcalls_back_handle_cmd(dev, &req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) &fedata->ring, notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) notify_all += notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (notify_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) notify_remote_via_irq(fedata->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) notify_all = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct xenbus_device *dev = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct pvcalls_fedata *fedata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) fedata = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (fedata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) pvcalls_back_work(fedata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) eoi_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) xen_irq_lateeoi(irq, eoi_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct sock_mapping *map = sock_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct pvcalls_ioworker *iow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) map->sock->sk->sk_user_data != map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) xen_irq_lateeoi(irq, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) iow = &map->ioworker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) atomic_inc(&map->write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) atomic_inc(&map->eoi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) atomic_inc(&map->io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) queue_work(iow->wq, &iow->register_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) static int backend_connect(struct xenbus_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) evtchn_port_t evtchn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) grant_ref_t ring_ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) struct pvcalls_fedata *fedata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) fedata = kzalloc(sizeof(struct pvcalls_fedata), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (!fedata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) fedata->irq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) err = xenbus_scanf(XBT_NIL, dev->otherend, "port", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) &evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (err != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) xenbus_dev_fatal(dev, err, "reading %s/event-channel",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) dev->otherend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", "%u", &ring_ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (err != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) xenbus_dev_fatal(dev, err, "reading %s/ring-ref",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) dev->otherend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) err = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) fedata->irq = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) err = request_threaded_irq(fedata->irq, NULL, pvcalls_back_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) IRQF_ONESHOT, "pvcalls-back", dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) err = xenbus_map_ring_valloc(dev, &ring_ref, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) (void **)&fedata->sring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) BACK_RING_INIT(&fedata->ring, fedata->sring, XEN_PAGE_SIZE * 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) fedata->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) INIT_LIST_HEAD(&fedata->socket_mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) INIT_RADIX_TREE(&fedata->socketpass_mappings, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) sema_init(&fedata->socket_lock, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) dev_set_drvdata(&dev->dev, fedata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) down(&pvcalls_back_global.frontends_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) list_add_tail(&fedata->list, &pvcalls_back_global.frontends);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) up(&pvcalls_back_global.frontends_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (fedata->irq >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) unbind_from_irqhandler(fedata->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (fedata->sring != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) xenbus_unmap_ring_vfree(dev, fedata->sring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) kfree(fedata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) static int backend_disconnect(struct xenbus_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct pvcalls_fedata *fedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) struct sock_mapping *map, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct sockpass_mapping *mappass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct radix_tree_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) void **slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) fedata = dev_get_drvdata(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) down(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) list_del(&map->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) pvcalls_back_release_active(dev, fedata, map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) radix_tree_for_each_slot(slot, &fedata->socketpass_mappings, &iter, 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) mappass = radix_tree_deref_slot(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (!mappass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (radix_tree_exception(mappass)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) if (radix_tree_deref_retry(mappass))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) slot = radix_tree_iter_retry(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) radix_tree_delete(&fedata->socketpass_mappings,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) mappass->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) pvcalls_back_release_passive(dev, fedata, mappass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) up(&fedata->socket_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) unbind_from_irqhandler(fedata->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) xenbus_unmap_ring_vfree(dev, fedata->sring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) list_del(&fedata->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) kfree(fedata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) dev_set_drvdata(&dev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static int pvcalls_back_probe(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) const struct xenbus_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) int err, abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) struct xenbus_transaction xbt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) abort = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) err = xenbus_transaction_start(&xbt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) pr_warn("%s cannot create xenstore transaction\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) err = xenbus_printf(xbt, dev->nodename, "versions", "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) PVCALLS_VERSIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) pr_warn("%s write out 'versions' failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) err = xenbus_printf(xbt, dev->nodename, "max-page-order", "%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) MAX_RING_ORDER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) pr_warn("%s write out 'max-page-order' failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) err = xenbus_printf(xbt, dev->nodename, "function-calls",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) XENBUS_FUNCTIONS_CALLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) pr_warn("%s write out 'function-calls' failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) goto abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) abort = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) err = xenbus_transaction_end(xbt, abort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (err == -EAGAIN && !abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) pr_warn("%s cannot complete xenstore transaction\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (abort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) xenbus_switch_state(dev, XenbusStateInitWait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static void set_backend_state(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) enum xenbus_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) while (dev->state != state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) switch (dev->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) case XenbusStateClosed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) case XenbusStateInitWait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) case XenbusStateConnected:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) xenbus_switch_state(dev, XenbusStateInitWait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) case XenbusStateClosing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) xenbus_switch_state(dev, XenbusStateClosing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) case XenbusStateInitWait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) case XenbusStateInitialised:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) case XenbusStateConnected:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (backend_connect(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) xenbus_switch_state(dev, XenbusStateConnected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) case XenbusStateClosing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) case XenbusStateClosed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) xenbus_switch_state(dev, XenbusStateClosing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) case XenbusStateConnected:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) case XenbusStateInitWait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) case XenbusStateClosing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) case XenbusStateClosed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) down(&pvcalls_back_global.frontends_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) backend_disconnect(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) up(&pvcalls_back_global.frontends_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) xenbus_switch_state(dev, XenbusStateClosing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) case XenbusStateClosing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) case XenbusStateInitWait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) case XenbusStateConnected:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) case XenbusStateClosed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) xenbus_switch_state(dev, XenbusStateClosed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static void pvcalls_back_changed(struct xenbus_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) enum xenbus_state frontend_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) switch (frontend_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) case XenbusStateInitialising:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) set_backend_state(dev, XenbusStateInitWait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) case XenbusStateInitialised:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) case XenbusStateConnected:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) set_backend_state(dev, XenbusStateConnected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) case XenbusStateClosing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) set_backend_state(dev, XenbusStateClosing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) case XenbusStateClosed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) set_backend_state(dev, XenbusStateClosed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (xenbus_dev_is_online(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) device_unregister(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) case XenbusStateUnknown:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) set_backend_state(dev, XenbusStateClosed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) device_unregister(&dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) frontend_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static int pvcalls_back_remove(struct xenbus_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static int pvcalls_back_uevent(struct xenbus_device *xdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) struct kobj_uevent_env *env)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) static const struct xenbus_device_id pvcalls_back_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) { "pvcalls" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) { "" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) static struct xenbus_driver pvcalls_back_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) .ids = pvcalls_back_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) .probe = pvcalls_back_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) .remove = pvcalls_back_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) .uevent = pvcalls_back_uevent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) .otherend_changed = pvcalls_back_changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) static int __init pvcalls_back_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (!xen_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) ret = xenbus_register_backend(&pvcalls_back_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) sema_init(&pvcalls_back_global.frontends_lock, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) INIT_LIST_HEAD(&pvcalls_back_global.frontends);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) module_init(pvcalls_back_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) static void __exit pvcalls_back_fin(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) struct pvcalls_fedata *fedata, *nfedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) down(&pvcalls_back_global.frontends_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) list_for_each_entry_safe(fedata, nfedata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) &pvcalls_back_global.frontends, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) backend_disconnect(fedata->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) up(&pvcalls_back_global.frontends_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) xenbus_unregister_driver(&pvcalls_back_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) module_exit(pvcalls_back_fin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) MODULE_DESCRIPTION("Xen PV Calls backend driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) MODULE_AUTHOR("Stefano Stabellini <sstabellini@kernel.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) MODULE_LICENSE("GPL");