^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * The Virtio 9p transport driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This is a block based transport driver based on the lguest block driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2007, 2008 Eric Van Hensbergen, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Based on virtio console driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/un.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/inet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <net/9p/9p.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/parser.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <net/9p/client.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <net/9p/transport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/virtio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/virtio_9p.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "trans_common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define VIRTQUEUE_NUM 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* a single mutex to manage channel initialization and attachment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static DEFINE_MUTEX(virtio_9p_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static DECLARE_WAIT_QUEUE_HEAD(vp_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static atomic_t vp_pinned = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * struct virtio_chan - per-instance transport information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * @inuse: whether the channel is in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * @lock: protects multiple elements within this structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * @client: client instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * @vdev: virtio dev associated with this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * @vq: virtio queue associated with this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * @sg: scatter gather list which is used to pack a request (protected?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * We keep all per-channel information in a structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * This structure is allocated within the devices dev->mem space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * A pointer to the structure will get put in the transport private.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct virtio_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) bool inuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct p9_client *client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct virtio_device *vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int ring_bufs_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) wait_queue_head_t *vc_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /* This is global limit. Since we don't have a global structure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * will be placing it in each channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned long p9_max_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* Scatterlist: can be too big for stack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct scatterlist sg[VIRTQUEUE_NUM];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * tag name to identify a mount null terminated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) char *tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct list_head chan_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static struct list_head virtio_chan_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* How many bytes left in this page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static unsigned int rest_of_page(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return PAGE_SIZE - offset_in_page(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * p9_virtio_close - reclaim resources of a channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @client: client instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * This reclaims a channel by freeing its resources and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * reseting its inuse flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static void p9_virtio_close(struct p9_client *client)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct virtio_chan *chan = client->trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) mutex_lock(&virtio_9p_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) chan->inuse = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) mutex_unlock(&virtio_9p_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * req_done - callback which signals activity from the server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * @vq: virtio queue activity was received on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * This notifies us that the server has triggered some activity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * on the virtio channel - most likely a response to request we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * sent. Figure out which requests now have responses and wake up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * those threads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * Bugs: could do with some additional sanity checking, but appears to work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static void req_done(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct virtio_chan *chan = vq->vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct p9_req_t *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) bool need_wakeup = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) p9_debug(P9_DEBUG_TRANS, ": request done\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) while ((req = virtqueue_get_buf(chan->vq, &len)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (!chan->ring_bufs_avail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) chan->ring_bufs_avail = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) need_wakeup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) req->rc.size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* Wakeup if anyone waiting for VirtIO ring space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (need_wakeup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) wake_up(chan->vc_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * pack_sg_list - pack a scatter gather list from a linear buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * @sg: scatter/gather list to pack into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * @start: which segment of the sg_list to start at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * @limit: maximum segment to pack data to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * @data: data to pack into scatter/gather list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * @count: amount of data to pack into the scatter/gather list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * sg_lists have multiple segments of various sizes. This will pack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * arbitrary data into an existing scatter gather list, segmenting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * data as necessary within constraints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static int pack_sg_list(struct scatterlist *sg, int start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int limit, char *data, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int index = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) s = rest_of_page(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (s > count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) s = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) BUG_ON(index >= limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* Make sure we don't terminate early. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) sg_unmark_end(&sg[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) sg_set_buf(&sg[index++], data, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) count -= s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) data += s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (index-start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) sg_mark_end(&sg[index - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return index-start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /* We don't currently allow canceling of virtio requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* Reply won't come, so drop req ref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static int p9_virtio_cancelled(struct p9_client *client, struct p9_req_t *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) p9_req_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * this takes a list of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * @sg: scatter/gather list to pack into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * @start: which segment of the sg_list to start at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * @pdata: a list of pages to add into sg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * @nr_pages: number of pages to pack into the scatter/gather list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * @offs: amount of data in the beginning of first page _not_ to pack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * @count: amount of data to pack into the scatter/gather list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) pack_sg_list_p(struct scatterlist *sg, int start, int limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct page **pdata, int nr_pages, size_t offs, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) int i = 0, s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int data_off = offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int index = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) BUG_ON(nr_pages > (limit - start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * if the first page doesn't start at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * page boundary find the offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) while (nr_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) s = PAGE_SIZE - data_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (s > count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) s = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) BUG_ON(index >= limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* Make sure we don't terminate early. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) sg_unmark_end(&sg[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) sg_set_page(&sg[index++], pdata[i++], s, data_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) data_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) count -= s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) nr_pages--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (index-start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) sg_mark_end(&sg[index - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return index - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * p9_virtio_request - issue a request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * @client: client instance issuing the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * @req: request to be issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) int in, out, out_sgs, in_sgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct virtio_chan *chan = client->trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct scatterlist *sgs[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) req->status = REQ_STATUS_SENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) req_retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) out_sgs = in_sgs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* Handle out VirtIO ring buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) out = pack_sg_list(chan->sg, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) sgs[out_sgs++] = chan->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) in = pack_sg_list(chan->sg, out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) VIRTQUEUE_NUM, req->rc.sdata, req->rc.capacity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) sgs[out_sgs + in_sgs++] = chan->sg + out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (err == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) chan->ring_bufs_avail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) err = wait_event_killable(*chan->vc_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) chan->ring_bufs_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (err == -ERESTARTSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) goto req_retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) p9_debug(P9_DEBUG_TRANS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) "virtio rpc add_sgs returned failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) virtqueue_kick(chan->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int p9_get_mapped_pages(struct virtio_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct page ***pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) struct iov_iter *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) size_t *offs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) int *need_drop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) if (!iov_iter_count(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!iov_iter_is_kvec(data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * We allow only p9_max_pages pinned. We wait for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * Other zc request to finish here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) err = wait_event_killable(vp_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) (atomic_read(&vp_pinned) < chan->p9_max_pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (err == -ERESTARTSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) n = iov_iter_get_pages_alloc(data, pages, count, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (n < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *need_drop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) atomic_add(nr_pages, &vp_pinned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* kernel buffer, no need to pin pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* we'd already checked that it's non-empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) len = iov_iter_single_seg_count(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (likely(len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) p = data->kvec->iov_base + data->iov_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) iov_iter_advance(data, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (len > count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) len = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) (unsigned long)p / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) *pages = kmalloc_array(nr_pages, sizeof(struct page *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (!*pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) *need_drop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) p -= (*offs = offset_in_page(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) for (index = 0; index < nr_pages; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (is_vmalloc_addr(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) (*pages)[index] = vmalloc_to_page(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) (*pages)[index] = kmap_to_page(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) p += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * p9_virtio_zc_request - issue a zero copy request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * @client: client instance issuing the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * @req: request to be issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * @uidata: user buffer that should be used for zero copy read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * @uodata: user buffer that should be used for zero copy write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * @inlen: read buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * @outlen: write buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * @in_hdr_len: reader header size, This is the size of response protocol data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct iov_iter *uidata, struct iov_iter *uodata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) int inlen, int outlen, int in_hdr_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) int in, out, err, out_sgs, in_sgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int in_nr_pages = 0, out_nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct page **in_pages = NULL, **out_pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct virtio_chan *chan = client->trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct scatterlist *sgs[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) size_t offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int need_drop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) int kicked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) p9_debug(P9_DEBUG_TRANS, "virtio request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (uodata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) __le32 sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) int n = p9_get_mapped_pages(chan, &out_pages, uodata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) outlen, &offs, &need_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (n < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) err = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (n != outlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) __le32 v = cpu_to_le32(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) outlen = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* The size field of the message must include the length of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * header and the length of the data. We didn't actually know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * the length of the data until this point so add it in now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) sz = cpu_to_le32(req->tc.size + outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) memcpy(&req->tc.sdata[0], &sz, sizeof(sz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) } else if (uidata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) int n = p9_get_mapped_pages(chan, &in_pages, uidata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) inlen, &offs, &need_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (n < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) err = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (n != inlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) __le32 v = cpu_to_le32(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) memcpy(&req->tc.sdata[req->tc.size - 4], &v, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) inlen = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) req->status = REQ_STATUS_SENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) req_retry_pinned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) spin_lock_irqsave(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) out_sgs = in_sgs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* out data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) out = pack_sg_list(chan->sg, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) VIRTQUEUE_NUM, req->tc.sdata, req->tc.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) sgs[out_sgs++] = chan->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (out_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) sgs[out_sgs++] = chan->sg + out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) out_pages, out_nr_pages, offs, outlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * Take care of in data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * For example TREAD have 11.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * 11 is the read/write header = PDU Header(7) + IO Size (4).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * Arrange in such a way that server places header in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * alloced memory and payload onto the user buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) in = pack_sg_list(chan->sg, out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) VIRTQUEUE_NUM, req->rc.sdata, in_hdr_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (in)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) sgs[out_sgs + in_sgs++] = chan->sg + out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (in_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) sgs[out_sgs + in_sgs++] = chan->sg + out + in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) in_pages, in_nr_pages, offs, inlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (err == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) chan->ring_bufs_avail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) err = wait_event_killable(*chan->vc_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) chan->ring_bufs_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (err == -ERESTARTSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) goto req_retry_pinned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) p9_debug(P9_DEBUG_TRANS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) "virtio rpc add_sgs returned failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) virtqueue_kick(chan->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) spin_unlock_irqrestore(&chan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) kicked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * Non kernel buffers are pinned, unpin them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (need_drop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (in_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) p9_release_pages(in_pages, in_nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) atomic_sub(in_nr_pages, &vp_pinned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (out_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) p9_release_pages(out_pages, out_nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) atomic_sub(out_nr_pages, &vp_pinned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /* wakeup anybody waiting for slots to pin pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) wake_up(&vp_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) kvfree(in_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) kvfree(out_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (!kicked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* reply won't come */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) p9_req_put(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static ssize_t p9_mount_tag_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct virtio_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct virtio_device *vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) int tag_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) vdev = dev_to_virtio(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) chan = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) tag_len = strlen(chan->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) memcpy(buf, chan->tag, tag_len + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return tag_len + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * p9_virtio_probe - probe for existence of 9P virtio channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * @vdev: virtio device to probe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * This probes for existing virtio channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static int p9_virtio_probe(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) __u16 tag_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) char *tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct virtio_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (!vdev->config->get) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) dev_err(&vdev->dev, "%s failure: config access disabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (!chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) pr_err("Failed to allocate virtio 9P channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) chan->vdev = vdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* We expect one virtqueue, for requests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (IS_ERR(chan->vq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) err = PTR_ERR(chan->vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) goto out_free_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) chan->vq->vdev->priv = chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) spin_lock_init(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) sg_init_table(chan->sg, VIRTQUEUE_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) chan->inuse = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) virtio_cread(vdev, struct virtio_9p_config, tag_len, &tag_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) goto out_free_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) tag = kzalloc(tag_len + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (!tag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) goto out_free_vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) virtio_cread_bytes(vdev, offsetof(struct virtio_9p_config, tag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) tag, tag_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) chan->tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) goto out_free_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (!chan->vc_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) goto out_remove_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) init_waitqueue_head(chan->vc_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) chan->ring_bufs_avail = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /* Ceiling limit to avoid denial of service attacks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) chan->p9_max_pages = nr_free_buffer_pages()/4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) virtio_device_ready(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) mutex_lock(&virtio_9p_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) list_add_tail(&chan->chan_list, &virtio_chan_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) mutex_unlock(&virtio_9p_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /* Let udev rules use the new mount_tag attribute. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) out_remove_file:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) sysfs_remove_file(&vdev->dev.kobj, &dev_attr_mount_tag.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) out_free_tag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) kfree(tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) out_free_vq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) vdev->config->del_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) out_free_chan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) kfree(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * p9_virtio_create - allocate a new virtio channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * @client: client instance invoking this transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * @devname: string identifying the channel to connect to (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * @args: args passed from sys_mount() for per-transport options (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * This sets up a transport channel for 9p communication. Right now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * we only match the first available channel, but eventually we couldlook up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * alternate channels by matching devname versus a virtio_config entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * We use a simple reference count mechanism to ensure that only a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * mount has a channel open at a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) p9_virtio_create(struct p9_client *client, const char *devname, char *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) struct virtio_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int ret = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (devname == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) mutex_lock(&virtio_9p_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) list_for_each_entry(chan, &virtio_chan_list, chan_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (!strcmp(devname, chan->tag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (!chan->inuse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) chan->inuse = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) mutex_unlock(&virtio_9p_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) pr_err("no channels available for device %s\n", devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) client->trans = (void *)chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) client->status = Connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) chan->client = client;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * p9_virtio_remove - clean up resources associated with a virtio device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * @vdev: virtio device to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static void p9_virtio_remove(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct virtio_chan *chan = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) unsigned long warning_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) mutex_lock(&virtio_9p_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) /* Remove self from list so we don't get new users. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) list_del(&chan->chan_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) warning_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /* Wait for existing users to close. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) while (chan->inuse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) mutex_unlock(&virtio_9p_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) msleep(250);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (time_after(jiffies, warning_time + 10 * HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) dev_emerg(&vdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) "p9_virtio_remove: waiting for device in use.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) warning_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) mutex_lock(&virtio_9p_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) mutex_unlock(&virtio_9p_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) vdev->config->reset(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) vdev->config->del_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) kfree(chan->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) kfree(chan->vc_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) kfree(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static struct virtio_device_id id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) { VIRTIO_ID_9P, VIRTIO_DEV_ANY_ID },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) { 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) static unsigned int features[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) VIRTIO_9P_MOUNT_TAG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /* The standard "struct lguest_driver": */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static struct virtio_driver p9_virtio_drv = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) .feature_table = features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) .feature_table_size = ARRAY_SIZE(features),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) .driver.name = KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) .driver.owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) .id_table = id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) .probe = p9_virtio_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) .remove = p9_virtio_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) static struct p9_trans_module p9_virtio_trans = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) .name = "virtio",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) .create = p9_virtio_create,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) .close = p9_virtio_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) .request = p9_virtio_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) .zc_request = p9_virtio_zc_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) .cancel = p9_virtio_cancel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) .cancelled = p9_virtio_cancelled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * We leave one entry for input and one entry for response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * headers. We also skip one more entry to accomodate, address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * that are not at page boundary, that can result in an extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * page in zero copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) .def = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /* The standard init function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static int __init p9_virtio_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) INIT_LIST_HEAD(&virtio_chan_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) v9fs_register_trans(&p9_virtio_trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) rc = register_virtio_driver(&p9_virtio_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) v9fs_unregister_trans(&p9_virtio_trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static void __exit p9_virtio_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) unregister_virtio_driver(&p9_virtio_drv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) v9fs_unregister_trans(&p9_virtio_trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) module_init(p9_virtio_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) module_exit(p9_virtio_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) MODULE_DEVICE_TABLE(virtio, id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) MODULE_DESCRIPTION("Virtio 9p Transport");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) MODULE_LICENSE("GPL");