Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * virtio-fs: Virtio Filesystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (C) 2018 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/dax.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/pfn_t.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/virtio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/virtio_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/fs_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/fs_parser.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "fuse_i.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) /* List of virtio-fs device instances and a lock for the list. Also provides
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * mutual exclusion in device removal and mounting path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) static DEFINE_MUTEX(virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) static LIST_HEAD(virtio_fs_instances);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 	VQ_HIPRIO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	VQ_REQUEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define VQ_NAME_LEN	24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) /* Per-virtqueue state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) struct virtio_fs_vq {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	struct virtqueue *vq;     /* protected by ->lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	struct work_struct done_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	struct list_head queued_reqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	struct list_head end_reqs;	/* End these requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	struct delayed_work dispatch_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	struct fuse_dev *fud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	bool connected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	long in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	struct completion in_flight_zero; /* No inflight requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	char name[VQ_NAME_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) } ____cacheline_aligned_in_smp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) /* A virtio-fs device instance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) struct virtio_fs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	struct kref refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	struct list_head list;    /* on virtio_fs_instances */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	char *tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	struct virtio_fs_vq *vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	unsigned int nvqs;               /* number of virtqueues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	unsigned int num_request_queues; /* number of request queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	struct dax_device *dax_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	/* DAX memory window where file contents are mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	void *window_kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	phys_addr_t window_phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	size_t window_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) struct virtio_fs_forget_req {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	struct fuse_in_header ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	struct fuse_forget_in arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) struct virtio_fs_forget {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	/* This request can be temporarily queued on virt queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	struct virtio_fs_forget_req req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) struct virtio_fs_req_work {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	struct fuse_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	struct virtio_fs_vq *fsvq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	struct work_struct done_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 				 struct fuse_req *req, bool in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	OPT_DAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) static const struct fs_parameter_spec virtio_fs_parameters[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	fsparam_flag("dax", OPT_DAX),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) static int virtio_fs_parse_param(struct fs_context *fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 				 struct fs_parameter *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	struct fs_parse_result result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	struct fuse_fs_context *ctx = fc->fs_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	int opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	opt = fs_parse(fc, virtio_fs_parameters, param, &result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	if (opt < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 		return opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	switch (opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	case OPT_DAX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 		ctx->dax = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) static void virtio_fs_free_fc(struct fs_context *fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	struct fuse_fs_context *ctx = fc->fs_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	kfree(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	struct virtio_fs *fs = vq->vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	return &fs->vqs[vq->index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) static inline struct fuse_pqueue *vq_to_fpq(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	return &vq_to_fsvq(vq)->fud->pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) /* Should be called with fsvq->lock held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	fsvq->in_flight++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) /* Should be called with fsvq->lock held. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	WARN_ON(fsvq->in_flight <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	fsvq->in_flight--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	if (!fsvq->in_flight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		complete(&fsvq->in_flight_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) static void release_virtio_fs_obj(struct kref *ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	kfree(vfs->vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	kfree(vfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) /* Make sure virtiofs_mutex is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) static void virtio_fs_put(struct virtio_fs *fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	kref_put(&fs->refcount, release_virtio_fs_obj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) static void virtio_fs_fiq_release(struct fuse_iqueue *fiq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	struct virtio_fs *vfs = fiq->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	mutex_lock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	virtio_fs_put(vfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	mutex_unlock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	WARN_ON(fsvq->in_flight < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	/* Wait for in flight requests to finish.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	if (fsvq->in_flight) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		/* We are holding virtio_fs_mutex. There should not be any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		 * waiters waiting for completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		reinit_completion(&fsvq->in_flight_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		wait_for_completion(&fsvq->in_flight_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	flush_work(&fsvq->done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	flush_delayed_work(&fsvq->dispatch_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) static void virtio_fs_drain_all_queues_locked(struct virtio_fs *fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	struct virtio_fs_vq *fsvq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	for (i = 0; i < fs->nvqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		fsvq = &fs->vqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		virtio_fs_drain_queue(fsvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	/* Provides mutual exclusion between ->remove and ->kill_sb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	 * paths. We don't want both of these draining queue at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	 * same time. Current completion logic reinits completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	 * and that means there should not be any other thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	 * doing reinit or waiting for completion already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	mutex_lock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	virtio_fs_drain_all_queues_locked(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	mutex_unlock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) static void virtio_fs_start_all_queues(struct virtio_fs *fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	struct virtio_fs_vq *fsvq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	for (i = 0; i < fs->nvqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		fsvq = &fs->vqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		fsvq->connected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) /* Add a new instance to the list or return -EEXIST if tag name exists*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) static int virtio_fs_add_instance(struct virtio_fs *fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	struct virtio_fs *fs2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	bool duplicate = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	mutex_lock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	list_for_each_entry(fs2, &virtio_fs_instances, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		if (strcmp(fs->tag, fs2->tag) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 			duplicate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	if (!duplicate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		list_add_tail(&fs->list, &virtio_fs_instances);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	mutex_unlock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	if (duplicate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) /* Return the virtio_fs with a given tag, or NULL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) static struct virtio_fs *virtio_fs_find_instance(const char *tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	struct virtio_fs *fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	mutex_lock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	list_for_each_entry(fs, &virtio_fs_instances, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		if (strcmp(fs->tag, tag) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 			kref_get(&fs->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 			goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	fs = NULL; /* not found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	mutex_unlock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	return fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) static void virtio_fs_free_devs(struct virtio_fs *fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	for (i = 0; i < fs->nvqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		struct virtio_fs_vq *fsvq = &fs->vqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		if (!fsvq->fud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		fuse_dev_free(fsvq->fud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		fsvq->fud = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) /* Read filesystem name from virtio config into fs->tag (must kfree()). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) static int virtio_fs_read_tag(struct virtio_device *vdev, struct virtio_fs *fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	char tag_buf[sizeof_field(struct virtio_fs_config, tag)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	char *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	virtio_cread_bytes(vdev, offsetof(struct virtio_fs_config, tag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 			   &tag_buf, sizeof(tag_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	end = memchr(tag_buf, '\0', sizeof(tag_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	if (end == tag_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		return -EINVAL; /* empty tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	if (!end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		end = &tag_buf[sizeof(tag_buf)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	len = end - tag_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	fs->tag = devm_kmalloc(&vdev->dev, len + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	if (!fs->tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	memcpy(fs->tag, tag_buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	fs->tag[len] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) /* Work function for hiprio completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) static void virtio_fs_hiprio_done_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 						 done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	struct virtqueue *vq = fsvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	/* Free completed FUSE_FORGET requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		void *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		virtqueue_disable_cb(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 			kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 			dec_in_flight_req(fsvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	} while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) static void virtio_fs_request_dispatch_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	struct fuse_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 						 dispatch_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	pr_debug("virtio-fs: worker %s called.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 					       list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		list_del_init(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		fuse_request_end(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	/* Dispatch pending requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		req = list_first_entry_or_null(&fsvq->queued_reqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 					       struct fuse_req, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 			spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		list_del_init(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		ret = virtio_fs_enqueue_req(fsvq, req, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 			if (ret == -ENOMEM || ret == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 				spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 				list_add_tail(&req->list, &fsvq->queued_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 				schedule_delayed_work(&fsvq->dispatch_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 						      msecs_to_jiffies(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 				spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			req->out.h.error = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 			dec_in_flight_req(fsvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 			pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 			       ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 			fuse_request_end(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394)  * Returns 1 if queue is full and sender should wait a bit before sending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395)  * next request, 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) static int send_forget_request(struct virtio_fs_vq *fsvq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 			       struct virtio_fs_forget *forget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			       bool in_flight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	struct scatterlist sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	bool notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	struct virtio_fs_forget_req *req = &forget->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	if (!fsvq->connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		if (in_flight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 			dec_in_flight_req(fsvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		kfree(forget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	sg_init_one(&sg, req, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	vq = fsvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	dev_dbg(&vq->vdev->dev, "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	ret = virtqueue_add_outbuf(vq, &sg, 1, forget, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		if (ret == -ENOMEM || ret == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 			pr_debug("virtio-fs: Could not queue FORGET: err=%d. Will try later\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 				 ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 			list_add_tail(&forget->list, &fsvq->queued_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 			schedule_delayed_work(&fsvq->dispatch_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 					      msecs_to_jiffies(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 			if (!in_flight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 				inc_in_flight_req(fsvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 			/* Queue is full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 				 ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 			kfree(forget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			if (in_flight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 				dec_in_flight_req(fsvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	if (!in_flight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		inc_in_flight_req(fsvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	notify = virtqueue_kick_prepare(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	if (notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		virtqueue_notify(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	struct virtio_fs_forget *forget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 						 dispatch_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	pr_debug("virtio-fs: worker %s called.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		forget = list_first_entry_or_null(&fsvq->queued_reqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 					struct virtio_fs_forget, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		if (!forget) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		list_del(&forget->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		if (send_forget_request(fsvq, forget, true))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) /* Allocate and copy args into req->argbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) static int copy_args_to_argbuf(struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	struct fuse_args *args = req->args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	unsigned int offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	unsigned int num_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	unsigned int num_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	num_in = args->in_numargs - args->in_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	num_out = args->out_numargs - args->out_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	len = fuse_len_args(num_in, (struct fuse_arg *) args->in_args) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	      fuse_len_args(num_out, args->out_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	req->argbuf = kmalloc(len, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	if (!req->argbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	for (i = 0; i < num_in; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		memcpy(req->argbuf + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		       args->in_args[i].value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		       args->in_args[i].size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		offset += args->in_args[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) /* Copy args out of and free req->argbuf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) static void copy_args_from_argbuf(struct fuse_args *args, struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	unsigned int remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	unsigned int num_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	unsigned int num_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	remaining = req->out.h.len - sizeof(req->out.h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	num_in = args->in_numargs - args->in_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	num_out = args->out_numargs - args->out_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	offset = fuse_len_args(num_in, (struct fuse_arg *)args->in_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	for (i = 0; i < num_out; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		unsigned int argsize = args->out_args[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		if (args->out_argvar &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		    i == args->out_numargs - 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		    argsize > remaining) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 			argsize = remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		memcpy(args->out_args[i].value, req->argbuf + offset, argsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		offset += argsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		if (i != args->out_numargs - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 			remaining -= argsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	/* Store the actual size of the variable-length arg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	if (args->out_argvar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		args->out_args[args->out_numargs - 1].size = remaining;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	kfree(req->argbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	req->argbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) /* Work function for request completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) static void virtio_fs_request_complete(struct fuse_req *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 				       struct virtio_fs_vq *fsvq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	struct fuse_pqueue *fpq = &fsvq->fud->pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	struct fuse_args *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	struct fuse_args_pages *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	unsigned int len, i, thislen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	 * TODO verify that server properly follows FUSE protocol
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	 * (oh.uniq, oh.len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	args = req->args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	copy_args_from_argbuf(args, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	if (args->out_pages && args->page_zeroing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		len = args->out_args[args->out_numargs - 1].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		ap = container_of(args, typeof(*ap), args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		for (i = 0; i < ap->num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			thislen = ap->descs[i].length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			if (len < thislen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 				WARN_ON(ap->descs[i].offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 				page = ap->pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 				zero_user_segment(page, len, thislen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 				len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 				len -= thislen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	spin_lock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	clear_bit(FR_SENT, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	spin_unlock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	fuse_request_end(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	dec_in_flight_req(fsvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) static void virtio_fs_complete_req_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	struct virtio_fs_req_work *w =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		container_of(work, typeof(*w), done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	virtio_fs_request_complete(w->req, w->fsvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	kfree(w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) static void virtio_fs_requests_done_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 						 done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	struct fuse_pqueue *fpq = &fsvq->fud->pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	struct virtqueue *vq = fsvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	struct fuse_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	struct fuse_req *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	LIST_HEAD(reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	/* Collect completed requests off the virtqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		virtqueue_disable_cb(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			spin_lock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 			list_move_tail(&req->list, &reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 			spin_unlock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	} while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	/* End requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	list_for_each_entry_safe(req, next, &reqs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		list_del_init(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		/* blocking async request completes in a worker context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		if (req->args->may_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			struct virtio_fs_req_work *w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			w = kzalloc(sizeof(*w), GFP_NOFS | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 			INIT_WORK(&w->done_work, virtio_fs_complete_req_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			w->fsvq = fsvq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			w->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			schedule_work(&w->done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 			virtio_fs_request_complete(req, fsvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) /* Virtqueue interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) static void virtio_fs_vq_done(struct virtqueue *vq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	struct virtio_fs_vq *fsvq = vq_to_fsvq(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	dev_dbg(&vq->vdev->dev, "%s %s\n", __func__, fsvq->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	schedule_work(&fsvq->done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) static void virtio_fs_init_vq(struct virtio_fs_vq *fsvq, char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			      int vq_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	strncpy(fsvq->name, name, VQ_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	spin_lock_init(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	INIT_LIST_HEAD(&fsvq->queued_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	INIT_LIST_HEAD(&fsvq->end_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	init_completion(&fsvq->in_flight_zero);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	if (vq_type == VQ_REQUEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		INIT_WORK(&fsvq->done_work, virtio_fs_requests_done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		INIT_DELAYED_WORK(&fsvq->dispatch_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 				  virtio_fs_request_dispatch_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		INIT_WORK(&fsvq->done_work, virtio_fs_hiprio_done_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		INIT_DELAYED_WORK(&fsvq->dispatch_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 				  virtio_fs_hiprio_dispatch_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) /* Initialize virtqueues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) static int virtio_fs_setup_vqs(struct virtio_device *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			       struct virtio_fs *fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	struct virtqueue **vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	vq_callback_t **callbacks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	const char **names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			&fs->num_request_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	if (fs->num_request_queues == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	fs->nvqs = VQ_REQUEST + fs->num_request_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	fs->vqs = kcalloc(fs->nvqs, sizeof(fs->vqs[VQ_HIPRIO]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	if (!fs->vqs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	vqs = kmalloc_array(fs->nvqs, sizeof(vqs[VQ_HIPRIO]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	callbacks = kmalloc_array(fs->nvqs, sizeof(callbacks[VQ_HIPRIO]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	names = kmalloc_array(fs->nvqs, sizeof(names[VQ_HIPRIO]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	if (!vqs || !callbacks || !names) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	/* Initialize the hiprio/forget request virtqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	callbacks[VQ_HIPRIO] = virtio_fs_vq_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	virtio_fs_init_vq(&fs->vqs[VQ_HIPRIO], "hiprio", VQ_HIPRIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	/* Initialize the requests virtqueues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	for (i = VQ_REQUEST; i < fs->nvqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		char vq_name[VQ_NAME_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		snprintf(vq_name, VQ_NAME_LEN, "requests.%u", i - VQ_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		virtio_fs_init_vq(&fs->vqs[i], vq_name, VQ_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		callbacks[i] = virtio_fs_vq_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		names[i] = fs->vqs[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	ret = virtio_find_vqs(vdev, fs->nvqs, vqs, callbacks, names, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	for (i = 0; i < fs->nvqs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		fs->vqs[i].vq = vqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	virtio_fs_start_all_queues(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	kfree(names);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	kfree(callbacks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	kfree(vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		kfree(fs->vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) /* Free virtqueues (device must already be reset) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) static void virtio_fs_cleanup_vqs(struct virtio_device *vdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 				  struct virtio_fs *fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	vdev->config->del_vqs(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) /* Map a window offset to a page frame number.  The window offset will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737)  * been produced by .iomap_begin(), which maps a file offset to a window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738)  * offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 				    long nr_pages, void **kaddr, pfn_t *pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	struct virtio_fs *fs = dax_get_private(dax_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	phys_addr_t offset = PFN_PHYS(pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	size_t max_nr_pages = fs->window_len/PAGE_SIZE - pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	if (kaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		*kaddr = fs->window_kaddr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	if (pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		*pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 					PFN_DEV | PFN_MAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) static size_t virtio_fs_copy_from_iter(struct dax_device *dax_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 				       pgoff_t pgoff, void *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 				       size_t bytes, struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	return copy_from_iter(addr, bytes, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) static size_t virtio_fs_copy_to_iter(struct dax_device *dax_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 				       pgoff_t pgoff, void *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 				       size_t bytes, struct iov_iter *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	return copy_to_iter(addr, bytes, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 				     pgoff_t pgoff, size_t nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	memset(kaddr, 0, nr_pages << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) static const struct dax_operations virtio_fs_dax_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	.direct_access = virtio_fs_direct_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	.copy_from_iter = virtio_fs_copy_from_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	.copy_to_iter = virtio_fs_copy_to_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	.zero_page_range = virtio_fs_zero_page_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) static void virtio_fs_cleanup_dax(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	struct dax_device *dax_dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	kill_dax(dax_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	put_dax(dax_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	struct virtio_shm_region cache_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	struct dev_pagemap *pgmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	bool have_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	if (!IS_ENABLED(CONFIG_FUSE_DAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	/* Get cache region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	have_cache = virtio_get_shm_region(vdev, &cache_reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 					   (u8)VIRTIO_FS_SHMCAP_ID_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	if (!have_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		dev_notice(&vdev->dev, "%s: No cache capability\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	if (!devm_request_mem_region(&vdev->dev, cache_reg.addr, cache_reg.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 				     dev_name(&vdev->dev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		dev_warn(&vdev->dev, "could not reserve region addr=0x%llx len=0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 			 cache_reg.addr, cache_reg.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	dev_notice(&vdev->dev, "Cache len: 0x%llx @ 0x%llx\n", cache_reg.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		   cache_reg.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	pgmap = devm_kzalloc(&vdev->dev, sizeof(*pgmap), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	if (!pgmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	pgmap->type = MEMORY_DEVICE_FS_DAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	/* Ideally we would directly use the PCI BAR resource but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	 * devm_memremap_pages() wants its own copy in pgmap.  So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	 * initialize a struct resource from scratch (only the start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	 * and end fields will be used).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	pgmap->range = (struct range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		.start = (phys_addr_t) cache_reg.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		.end = (phys_addr_t) cache_reg.addr + cache_reg.len - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	pgmap->nr_range = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	fs->window_kaddr = devm_memremap_pages(&vdev->dev, pgmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	if (IS_ERR(fs->window_kaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		return PTR_ERR(fs->window_kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	fs->window_phys_addr = (phys_addr_t) cache_reg.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	fs->window_len = (phys_addr_t) cache_reg.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		__func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	fs->dax_dev = alloc_dax(fs, NULL, &virtio_fs_dax_ops, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	if (IS_ERR(fs->dax_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		return PTR_ERR(fs->dax_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	return devm_add_action_or_reset(&vdev->dev, virtio_fs_cleanup_dax,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 					fs->dax_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) static int virtio_fs_probe(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	struct virtio_fs *fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	fs = kzalloc(sizeof(*fs), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	if (!fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	kref_init(&fs->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	vdev->priv = fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	ret = virtio_fs_read_tag(vdev, fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	ret = virtio_fs_setup_vqs(vdev, fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	/* TODO vq affinity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	ret = virtio_fs_setup_dax(vdev, fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		goto out_vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	/* Bring the device online in case the filesystem is mounted and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	 * requests need to be sent before we return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	virtio_device_ready(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	ret = virtio_fs_add_instance(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		goto out_vqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) out_vqs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	vdev->config->reset(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	virtio_fs_cleanup_vqs(vdev, fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	kfree(fs->vqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	vdev->priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	kfree(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) static void virtio_fs_stop_all_queues(struct virtio_fs *fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	struct virtio_fs_vq *fsvq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	for (i = 0; i < fs->nvqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		fsvq = &fs->vqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		fsvq->connected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) static void virtio_fs_remove(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	struct virtio_fs *fs = vdev->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	mutex_lock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	/* This device is going away. No one should get new reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	list_del_init(&fs->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	virtio_fs_stop_all_queues(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	virtio_fs_drain_all_queues_locked(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	vdev->config->reset(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	virtio_fs_cleanup_vqs(vdev, fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	vdev->priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	/* Put device reference on virtio_fs object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	virtio_fs_put(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	mutex_unlock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) static int virtio_fs_freeze(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	/* TODO need to save state here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	pr_warn("virtio-fs: suspend/resume not yet supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) static int virtio_fs_restore(struct virtio_device *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	 /* TODO need to restore state here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) static const struct virtio_device_id id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	{ VIRTIO_ID_FS, VIRTIO_DEV_ANY_ID },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) static const unsigned int feature_table[] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) static struct virtio_driver virtio_fs_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	.driver.name		= KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	.driver.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	.id_table		= id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	.feature_table		= feature_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	.feature_table_size	= ARRAY_SIZE(feature_table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	.probe			= virtio_fs_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	.remove			= virtio_fs_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	.freeze			= virtio_fs_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	.restore		= virtio_fs_restore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) static void virtio_fs_wake_forget_and_unlock(struct fuse_iqueue *fiq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) __releases(fiq->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	struct fuse_forget_link *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	struct virtio_fs_forget *forget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	struct virtio_fs_forget_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	struct virtio_fs *fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	struct virtio_fs_vq *fsvq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	u64 unique;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	link = fuse_dequeue_forget(fiq, 1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	unique = fuse_get_unique(fiq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	fs = fiq->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	fsvq = &fs->vqs[VQ_HIPRIO];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	/* Allocate a buffer for the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	forget = kmalloc(sizeof(*forget), GFP_NOFS | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	req = &forget->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	req->ih = (struct fuse_in_header){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		.opcode = FUSE_FORGET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		.nodeid = link->forget_one.nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		.unique = unique,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		.len = sizeof(*req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	req->arg = (struct fuse_forget_in){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		.nlookup = link->forget_one.nlookup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	send_forget_request(fsvq, forget, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	kfree(link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static void virtio_fs_wake_interrupt_and_unlock(struct fuse_iqueue *fiq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) __releases(fiq->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	 * TODO interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	 * Normal fs operations on a local filesystems aren't interruptible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	 * Exceptions are blocking lock operations; for example fcntl(F_SETLKW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	 * with shared lock between host and guest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) /* Count number of scatter-gather elements required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) static unsigned int sg_count_fuse_pages(struct fuse_page_desc *page_descs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 				       unsigned int num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 				       unsigned int total_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	unsigned int this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	for (i = 0; i < num_pages && total_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		this_len =  min(page_descs[i].length, total_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		total_len -= this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /* Return the number of scatter-gather list elements required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static unsigned int sg_count_fuse_req(struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	struct fuse_args *args = req->args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	struct fuse_args_pages *ap = container_of(args, typeof(*ap), args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	unsigned int size, total_sgs = 1 /* fuse_in_header */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	if (args->in_numargs - args->in_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		total_sgs += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	if (args->in_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		size = args->in_args[args->in_numargs - 1].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 						 size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	if (!test_bit(FR_ISREPLY, &req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		return total_sgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	total_sgs += 1 /* fuse_out_header */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	if (args->out_numargs - args->out_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		total_sgs += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	if (args->out_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		size = args->out_args[args->out_numargs - 1].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		total_sgs += sg_count_fuse_pages(ap->descs, ap->num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 						 size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	return total_sgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* Add pages to scatter-gather list and return number of elements used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) static unsigned int sg_init_fuse_pages(struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 				       struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 				       struct fuse_page_desc *page_descs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 				       unsigned int num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 				       unsigned int total_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	unsigned int this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	for (i = 0; i < num_pages && total_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		sg_init_table(&sg[i], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		this_len =  min(page_descs[i].length, total_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		sg_set_page(&sg[i], pages[i], this_len, page_descs[i].offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		total_len -= this_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /* Add args to scatter-gather list and return number of elements used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) static unsigned int sg_init_fuse_args(struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 				      struct fuse_req *req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 				      struct fuse_arg *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 				      unsigned int numargs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 				      bool argpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 				      void *argbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 				      unsigned int *len_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	unsigned int total_sgs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	len = fuse_len_args(numargs - argpages, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	if (len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		sg_init_one(&sg[total_sgs++], argbuf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	if (argpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		total_sgs += sg_init_fuse_pages(&sg[total_sgs],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 						ap->pages, ap->descs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 						ap->num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 						args[numargs - 1].size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	if (len_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		*len_used = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	return total_sgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) /* Add a request to a virtqueue and kick the device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 				 struct fuse_req *req, bool in_flight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	/* requests need at least 4 elements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	struct scatterlist *stack_sgs[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	struct scatterlist stack_sg[ARRAY_SIZE(stack_sgs)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	struct scatterlist **sgs = stack_sgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	struct scatterlist *sg = stack_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	struct virtqueue *vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	struct fuse_args *args = req->args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	unsigned int argbuf_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	unsigned int out_sgs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	unsigned int in_sgs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	unsigned int total_sgs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	bool notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	struct fuse_pqueue *fpq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	/* Does the sglist fit on the stack? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	total_sgs = sg_count_fuse_req(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	if (total_sgs > ARRAY_SIZE(stack_sgs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		sgs = kmalloc_array(total_sgs, sizeof(sgs[0]), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		sg = kmalloc_array(total_sgs, sizeof(sg[0]), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		if (!sgs || !sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	/* Use a bounce buffer since stack args cannot be mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	ret = copy_args_to_argbuf(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	/* Request elements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	sg_init_one(&sg[out_sgs++], &req->in.h, sizeof(req->in.h));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	out_sgs += sg_init_fuse_args(&sg[out_sgs], req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 				     (struct fuse_arg *)args->in_args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 				     args->in_numargs, args->in_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 				     req->argbuf, &argbuf_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	/* Reply elements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	if (test_bit(FR_ISREPLY, &req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		sg_init_one(&sg[out_sgs + in_sgs++],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 			    &req->out.h, sizeof(req->out.h));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		in_sgs += sg_init_fuse_args(&sg[out_sgs + in_sgs], req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 					    args->out_args, args->out_numargs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 					    args->out_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 					    req->argbuf + argbuf_used, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	WARN_ON(out_sgs + in_sgs != total_sgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	for (i = 0; i < total_sgs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		sgs[i] = &sg[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	if (!fsvq->connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		ret = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	vq = fsvq->vq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	ret = virtqueue_add_sgs(vq, sgs, out_sgs, in_sgs, req, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	/* Request successfully sent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	fpq = &fsvq->fud->pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	spin_lock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	list_add_tail(&req->list, fpq->processing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	spin_unlock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	set_bit(FR_SENT, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	/* matches barrier in request_wait_answer() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	if (!in_flight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		inc_in_flight_req(fsvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	notify = virtqueue_kick_prepare(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	if (notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		virtqueue_notify(vq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	if (ret < 0 && req->argbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		kfree(req->argbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		req->argbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	if (sgs != stack_sgs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		kfree(sgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		kfree(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) static void virtio_fs_wake_pending_and_unlock(struct fuse_iqueue *fiq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) __releases(fiq->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	struct virtio_fs *fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	struct fuse_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	struct virtio_fs_vq *fsvq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	WARN_ON(list_empty(&fiq->pending));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	req = list_last_entry(&fiq->pending, struct fuse_req, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	clear_bit(FR_PENDING, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	list_del_init(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	WARN_ON(!list_empty(&fiq->pending));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	fs = fiq->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		  __func__, req->in.h.opcode, req->in.h.unique,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		 req->in.h.nodeid, req->in.h.len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		 fuse_len_args(req->args->out_numargs, req->args->out_args));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	fsvq = &fs->vqs[queue_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	ret = virtio_fs_enqueue_req(fsvq, req, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		if (ret == -ENOMEM || ret == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 			 * Virtqueue full. Retry submission from worker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 			 * context as we might be holding fc->bg_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 			spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			list_add_tail(&req->list, &fsvq->queued_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 			inc_in_flight_req(fsvq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 			schedule_delayed_work(&fsvq->dispatch_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 						msecs_to_jiffies(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 			spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		req->out.h.error = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		/* Can't end request in submission context. Use a worker */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		list_add_tail(&req->list, &fsvq->end_reqs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		schedule_delayed_work(&fsvq->dispatch_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) static const struct fuse_iqueue_ops virtio_fs_fiq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	.wake_forget_and_unlock		= virtio_fs_wake_forget_and_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	.wake_interrupt_and_unlock	= virtio_fs_wake_interrupt_and_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	.wake_pending_and_unlock	= virtio_fs_wake_pending_and_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	.release			= virtio_fs_fiq_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) static inline void virtio_fs_ctx_set_defaults(struct fuse_fs_context *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	ctx->rootmode = S_IFDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	ctx->default_permissions = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	ctx->allow_other = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	ctx->max_read = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	ctx->blksize = 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	ctx->destroy = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	ctx->no_control = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	ctx->no_force_umount = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	struct fuse_mount *fm = get_fuse_mount_super(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	struct fuse_conn *fc = fm->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	struct virtio_fs *fs = fc->iq.priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	struct fuse_fs_context *ctx = fsc->fs_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	virtio_fs_ctx_set_defaults(ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	mutex_lock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	/* After holding mutex, make sure virtiofs device is still there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	 * Though we are holding a reference to it, drive ->remove might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	 * still have cleaned up virtual queues. In that case bail out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	if (list_empty(&fs->list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		pr_info("virtio-fs: tag <%s> not found\n", fs->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	/* Allocate fuse_dev for hiprio and notification queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	for (i = 0; i < fs->nvqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		struct virtio_fs_vq *fsvq = &fs->vqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		fsvq->fud = fuse_dev_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		if (!fsvq->fud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 			goto err_free_fuse_devs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	/* virtiofs allocates and installs its own fuse devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	ctx->fudptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	if (ctx->dax) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		if (!fs->dax_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			pr_err("virtio-fs: dax can't be enabled as filesystem"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			       " device does not support it.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 			goto err_free_fuse_devs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		ctx->dax_dev = fs->dax_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	err = fuse_fill_super_common(sb, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		goto err_free_fuse_devs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	for (i = 0; i < fs->nvqs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		struct virtio_fs_vq *fsvq = &fs->vqs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		fuse_dev_install(fsvq->fud, fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	/* Previous unmount will stop all queues. Start these again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	virtio_fs_start_all_queues(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	fuse_send_init(fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	mutex_unlock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) err_free_fuse_devs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	virtio_fs_free_devs(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	mutex_unlock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) static void virtio_fs_conn_destroy(struct fuse_mount *fm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	struct fuse_conn *fc = fm->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	struct virtio_fs *vfs = fc->iq.priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	struct virtio_fs_vq *fsvq = &vfs->vqs[VQ_HIPRIO];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	/* Stop dax worker. Soon evict_inodes() will be called which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	 * will free all memory ranges belonging to all inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	if (IS_ENABLED(CONFIG_FUSE_DAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		fuse_dax_cancel_work(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	/* Stop forget queue. Soon destroy will be sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	spin_lock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	fsvq->connected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	spin_unlock(&fsvq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	virtio_fs_drain_all_queues(vfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	fuse_conn_destroy(fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	/* fuse_conn_destroy() must have sent destroy. Stop all queues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	 * and drain one more time and free fuse devices. Freeing fuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	 * devices will drop their reference on fuse_conn and that in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	 * turn will drop its reference on virtio_fs object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	virtio_fs_stop_all_queues(vfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	virtio_fs_drain_all_queues(vfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	virtio_fs_free_devs(vfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) static void virtio_kill_sb(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	struct fuse_mount *fm = get_fuse_mount_super(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	bool last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	/* If mount failed, we can still be called without any fc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	if (fm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		last = fuse_mount_remove(fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		if (last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 			virtio_fs_conn_destroy(fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	kill_anon_super(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) static int virtio_fs_test_super(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 				struct fs_context *fsc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	struct fuse_mount *fsc_fm = fsc->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	struct fuse_mount *sb_fm = get_fuse_mount_super(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	return fsc_fm->fc->iq.priv == sb_fm->fc->iq.priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static int virtio_fs_set_super(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 			       struct fs_context *fsc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	err = get_anon_bdev(&sb->s_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		fuse_mount_get(fsc->s_fs_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static int virtio_fs_get_tree(struct fs_context *fsc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	struct virtio_fs *fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	struct super_block *sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	struct fuse_conn *fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	struct fuse_mount *fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	/* This gets a reference on virtio_fs object. This ptr gets installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	 * in fc->iq->priv. Once fuse_conn is going away, it calls ->put()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	 * to drop the reference to this object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	fs = virtio_fs_find_instance(fsc->source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	if (!fs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		pr_info("virtio-fs: tag <%s> not found\n", fsc->source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	fc = kzalloc(sizeof(struct fuse_conn), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	if (!fc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		mutex_lock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		virtio_fs_put(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		mutex_unlock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	fm = kzalloc(sizeof(struct fuse_mount), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	if (!fm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		mutex_lock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		virtio_fs_put(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		mutex_unlock(&virtio_fs_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		kfree(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	fuse_conn_init(fc, fm, fsc->user_ns, &virtio_fs_fiq_ops, fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	fc->release = fuse_free_conn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	fc->delete_stale = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	fc->auto_submounts = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	fsc->s_fs_info = fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	sb = sget_fc(fsc, virtio_fs_test_super, virtio_fs_set_super);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	fuse_mount_put(fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	if (IS_ERR(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		return PTR_ERR(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	if (!sb->s_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		err = virtio_fs_fill_super(sb, fsc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 			fuse_mount_put(fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 			sb->s_fs_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 			deactivate_locked_super(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		sb->s_flags |= SB_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	WARN_ON(fsc->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	fsc->root = dget(sb->s_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) static const struct fs_context_operations virtio_fs_context_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	.free		= virtio_fs_free_fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	.parse_param	= virtio_fs_parse_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	.get_tree	= virtio_fs_get_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) static int virtio_fs_init_fs_context(struct fs_context *fsc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	struct fuse_fs_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	ctx = kzalloc(sizeof(struct fuse_fs_context), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	if (!ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	fsc->fs_private = ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	fsc->ops = &virtio_fs_context_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) static struct file_system_type virtio_fs_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	.name		= "virtiofs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	.init_fs_context = virtio_fs_init_fs_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	.kill_sb	= virtio_kill_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) static int __init virtio_fs_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	ret = register_virtio_driver(&virtio_fs_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	ret = register_filesystem(&virtio_fs_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 		unregister_virtio_driver(&virtio_fs_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) module_init(virtio_fs_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) static void __exit virtio_fs_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	unregister_filesystem(&virtio_fs_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	unregister_virtio_driver(&virtio_fs_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) module_exit(virtio_fs_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) MODULE_AUTHOR("Stefan Hajnoczi <stefanha@redhat.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) MODULE_DESCRIPTION("Virtio Filesystem");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) MODULE_ALIAS_FS(KBUILD_MODNAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) MODULE_DEVICE_TABLE(virtio, id_table);