Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)   FUSE: Filesystem in Userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)   This program can be distributed under the terms of the GNU GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)   See the file COPYING.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include "fuse_i.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/poll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/miscdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/namei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/pipe_fs_i.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/splice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) MODULE_ALIAS_MISCDEV(FUSE_MINOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) MODULE_ALIAS("devname:fuse");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) /* Ordinary requests have even IDs, while interrupts IDs are odd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define FUSE_INT_REQ_BIT (1ULL << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define FUSE_REQ_ID_STEP (1ULL << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) static struct kmem_cache *fuse_req_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) static struct fuse_dev *fuse_get_dev(struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	 * Lockless access is OK, because file->private data is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	 * once during mount and is valid until the file is released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	return READ_ONCE(file->private_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	INIT_LIST_HEAD(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	INIT_LIST_HEAD(&req->intr_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	init_waitqueue_head(&req->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	refcount_set(&req->count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	__set_bit(FR_PENDING, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	req->fm = fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	if (req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 		fuse_request_init(fm, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) static void fuse_request_free(struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	kmem_cache_free(fuse_req_cachep, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static void __fuse_get_request(struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	refcount_inc(&req->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) /* Must be called with > 1 refcount */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) static void __fuse_put_request(struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	refcount_dec(&req->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) void fuse_set_initialized(struct fuse_conn *fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	/* Make sure stores before this are seen on another CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	fc->initialized = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	return !fc->initialized || (for_background && fc->blocked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) static void fuse_drop_waiting(struct fuse_conn *fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	 * lockess check of fc->connected is okay, because atomic_dec_and_test()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	 * provides a memory barrier mached with the one in fuse_wait_aborted()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	 * to ensure no wake-up is missed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	if (atomic_dec_and_test(&fc->num_waiting) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	    !READ_ONCE(fc->connected)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		/* wake up aborters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		wake_up_all(&fc->blocked_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) static void fuse_put_request(struct fuse_req *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	struct fuse_conn *fc = fm->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	struct fuse_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	atomic_inc(&fc->num_waiting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	if (fuse_block_alloc(fc, for_background)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		err = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		if (wait_event_killable_exclusive(fc->blocked_waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 				!fuse_block_alloc(fc, for_background)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	/* Matches smp_wmb() in fuse_set_initialized() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	err = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	if (!fc->connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	err = -ECONNREFUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	if (fc->conn_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	req = fuse_request_alloc(fm, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		if (for_background)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			wake_up(&fc->blocked_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	__set_bit(FR_WAITING, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	if (for_background)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		__set_bit(FR_BACKGROUND, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	if (unlikely(req->in.h.uid == ((uid_t)-1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		     req->in.h.gid == ((gid_t)-1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		fuse_put_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		return ERR_PTR(-EOVERFLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	fuse_drop_waiting(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) static void fuse_put_request(struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	struct fuse_conn *fc = req->fm->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	if (refcount_dec_and_test(&req->count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		if (test_bit(FR_BACKGROUND, &req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 			 * We get here in the unlikely case that a background
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 			 * request was allocated but not sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			spin_lock(&fc->bg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 			if (!fc->blocked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 				wake_up(&fc->blocked_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 			spin_unlock(&fc->bg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		if (test_bit(FR_WAITING, &req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 			__clear_bit(FR_WAITING, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 			fuse_drop_waiting(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		fuse_request_free(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	unsigned nbytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	for (i = 0; i < numargs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		nbytes += args[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	return nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) EXPORT_SYMBOL_GPL(fuse_len_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) u64 fuse_get_unique(struct fuse_iqueue *fiq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	fiq->reqctr += FUSE_REQ_ID_STEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	return fiq->reqctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) EXPORT_SYMBOL_GPL(fuse_get_unique);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static unsigned int fuse_req_hash(u64 unique)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209)  * A new request is available, wake fiq->waitq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) __releases(fiq->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	wake_up(&fiq->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) const struct fuse_iqueue_ops fuse_dev_fiq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	.wake_forget_and_unlock		= fuse_dev_wake_and_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	.wake_interrupt_and_unlock	= fuse_dev_wake_and_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	.wake_pending_and_unlock	= fuse_dev_wake_and_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) static void queue_request_and_unlock(struct fuse_iqueue *fiq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 				     struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) __releases(fiq->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	req->in.h.len = sizeof(struct fuse_in_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		fuse_len_args(req->args->in_numargs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 			      (struct fuse_arg *) req->args->in_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	list_add_tail(&req->list, &fiq->pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	fiq->ops->wake_pending_and_unlock(fiq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		       u64 nodeid, u64 nlookup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	struct fuse_iqueue *fiq = &fc->iq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	forget->forget_one.nodeid = nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	forget->forget_one.nlookup = nlookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	spin_lock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	if (fiq->connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 		fiq->forget_list_tail->next = forget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		fiq->forget_list_tail = forget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		fiq->ops->wake_forget_and_unlock(fiq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		kfree(forget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) static void flush_bg_queue(struct fuse_conn *fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	struct fuse_iqueue *fiq = &fc->iq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	while (fc->active_background < fc->max_background &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	       !list_empty(&fc->bg_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		struct fuse_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		list_del(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		fc->active_background++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		spin_lock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		req->in.h.unique = fuse_get_unique(fiq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		queue_request_and_unlock(fiq, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274)  * This function is called when a request is finished.  Either a reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275)  * has arrived or it was aborted (and not yet sent) or some error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276)  * occurred during communication with userspace, or the device file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  * was closed.  The requester thread is woken up (if still waiting),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)  * the 'end' callback is called if given, else the reference to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  * request is released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) void fuse_request_end(struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	struct fuse_mount *fm = req->fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	struct fuse_conn *fc = fm->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	struct fuse_iqueue *fiq = &fc->iq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	if (test_and_set_bit(FR_FINISHED, &req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		goto put_request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	 * test_and_set_bit() implies smp_mb() between bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	 * changing and below FR_INTERRUPTED check. Pairs with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	 * smp_mb() from queue_interrupt().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	if (test_bit(FR_INTERRUPTED, &req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		spin_lock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		list_del_init(&req->intr_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	WARN_ON(test_bit(FR_PENDING, &req->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	WARN_ON(test_bit(FR_SENT, &req->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	if (test_bit(FR_BACKGROUND, &req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		spin_lock(&fc->bg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		clear_bit(FR_BACKGROUND, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		if (fc->num_background == fc->max_background) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			fc->blocked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 			wake_up(&fc->blocked_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		} else if (!fc->blocked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 			 * Wake up next waiter, if any.  It's okay to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 			 * waitqueue_active(), as we've already synced up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 			 * fc->blocked with waiters with the wake_up() call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 			 * above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 			if (waitqueue_active(&fc->blocked_waitq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 				wake_up(&fc->blocked_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		if (fc->num_background == fc->congestion_threshold && fm->sb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 			clear_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 			clear_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		fc->num_background--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		fc->active_background--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		flush_bg_queue(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		spin_unlock(&fc->bg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		/* Wake up waiter sleeping in request_wait_answer() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		wake_up(&req->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	if (test_bit(FR_ASYNC, &req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		req->args->end(fm, req->args, req->out.h.error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) put_request:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	fuse_put_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) EXPORT_SYMBOL_GPL(fuse_request_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) static int queue_interrupt(struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	struct fuse_iqueue *fiq = &req->fm->fc->iq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	spin_lock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	/* Check for we've sent request to interrupt this req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 		spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	if (list_empty(&req->intr_entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		list_add_tail(&req->intr_entry, &fiq->interrupts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		 * Pairs with smp_mb() implied by test_and_set_bit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		 * from fuse_request_end().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		if (test_bit(FR_FINISHED, &req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 			list_del_init(&req->intr_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 			spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		fiq->ops->wake_interrupt_and_unlock(fiq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) static void request_wait_answer(struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	struct fuse_conn *fc = req->fm->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	struct fuse_iqueue *fiq = &fc->iq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	if (!fc->no_interrupt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		/* Any signal may interrupt this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		err = wait_event_interruptible(req->waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 					test_bit(FR_FINISHED, &req->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		set_bit(FR_INTERRUPTED, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		/* matches barrier in fuse_dev_do_read() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		if (test_bit(FR_SENT, &req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 			queue_interrupt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	if (!test_bit(FR_FORCE, &req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		/* Only fatal signals may interrupt this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		err = wait_event_killable(req->waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 					test_bit(FR_FINISHED, &req->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		spin_lock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		/* Request is not yet in userspace, bail out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		if (test_bit(FR_PENDING, &req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			list_del(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 			spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 			__fuse_put_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 			req->out.h.error = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	 * Either request is already in userspace, or it was forced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	 * Wait it out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) static void __fuse_request_send(struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	struct fuse_iqueue *fiq = &req->fm->fc->iq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	spin_lock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	if (!fiq->connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		req->out.h.error = -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		req->in.h.unique = fuse_get_unique(fiq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		/* acquire extra reference, since request is still needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		   after fuse_request_end() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		__fuse_get_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		queue_request_and_unlock(fiq, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		request_wait_answer(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		/* Pairs with smp_wmb() in fuse_request_end() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		smp_rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	if (fc->minor < 4 && args->opcode == FUSE_STATFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (fc->minor < 9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		switch (args->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		case FUSE_LOOKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		case FUSE_CREATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		case FUSE_MKNOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		case FUSE_MKDIR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		case FUSE_SYMLINK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		case FUSE_LINK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 			args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		case FUSE_GETATTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		case FUSE_SETATTR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 			args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	if (fc->minor < 12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		switch (args->opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		case FUSE_CREATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 			args->in_args[0].size = sizeof(struct fuse_open_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		case FUSE_MKNOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) static void fuse_force_creds(struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	struct fuse_conn *fc = req->fm->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	req->in.h.opcode = args->opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	req->in.h.nodeid = args->nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	req->args = args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	if (args->end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		__set_bit(FR_ASYNC, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	struct fuse_conn *fc = fm->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	struct fuse_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	if (args->force) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		atomic_inc(&fc->num_waiting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		if (!args->nocreds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			fuse_force_creds(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		__set_bit(FR_WAITING, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		__set_bit(FR_FORCE, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		WARN_ON(args->nocreds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		req = fuse_get_req(fm, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		if (IS_ERR(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			return PTR_ERR(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	/* Needs to be done after fuse_get_req() so that fc->minor is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	fuse_adjust_compat(fc, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	fuse_args_to_req(req, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	if (!args->noreply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		__set_bit(FR_ISREPLY, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	__fuse_request_send(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	ret = req->out.h.error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	if (!ret && args->out_argvar) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		BUG_ON(args->out_numargs == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		ret = args->out_args[args->out_numargs - 1].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	fuse_put_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) static bool fuse_request_queue_background(struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	struct fuse_mount *fm = req->fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	struct fuse_conn *fc = fm->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	bool queued = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	if (!test_bit(FR_WAITING, &req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		__set_bit(FR_WAITING, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		atomic_inc(&fc->num_waiting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	__set_bit(FR_ISREPLY, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	spin_lock(&fc->bg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	if (likely(fc->connected)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		fc->num_background++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		if (fc->num_background == fc->max_background)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			fc->blocked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		if (fc->num_background == fc->congestion_threshold && fm->sb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			set_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			set_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		list_add_tail(&req->list, &fc->bg_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		flush_bg_queue(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		queued = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	spin_unlock(&fc->bg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	return queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			    gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	struct fuse_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	if (args->force) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		WARN_ON(!args->nocreds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		req = fuse_request_alloc(fm, gfp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		if (!req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		__set_bit(FR_BACKGROUND, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		WARN_ON(args->nocreds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		req = fuse_get_req(fm, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		if (IS_ERR(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			return PTR_ERR(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	fuse_args_to_req(req, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	if (!fuse_request_queue_background(req)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		fuse_put_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		return -ENOTCONN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) EXPORT_SYMBOL_GPL(fuse_simple_background);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) static int fuse_simple_notify_reply(struct fuse_mount *fm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 				    struct fuse_args *args, u64 unique)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	struct fuse_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	struct fuse_iqueue *fiq = &fm->fc->iq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	req = fuse_get_req(fm, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	if (IS_ERR(req))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		return PTR_ERR(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	__clear_bit(FR_ISREPLY, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	req->in.h.unique = unique;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	fuse_args_to_req(req, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	spin_lock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	if (fiq->connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		queue_request_and_unlock(fiq, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		fuse_put_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615)  * Lock the request.  Up to the next unlock_request() there mustn't be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616)  * anything that could cause a page-fault.  If the request was already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617)  * aborted bail out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) static int lock_request(struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	if (req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		spin_lock(&req->waitq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		if (test_bit(FR_ABORTED, &req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			set_bit(FR_LOCKED, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		spin_unlock(&req->waitq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634)  * Unlock request.  If it was aborted while locked, caller is responsible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635)  * for unlocking and ending the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) static int unlock_request(struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	if (req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		spin_lock(&req->waitq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		if (test_bit(FR_ABORTED, &req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			clear_bit(FR_LOCKED, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		spin_unlock(&req->waitq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) struct fuse_copy_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	int write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	struct fuse_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	struct iov_iter *iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	struct pipe_buffer *pipebufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	struct pipe_buffer *currbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	struct pipe_inode_info *pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	unsigned long nr_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	struct page *pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	unsigned len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	unsigned offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	unsigned move_pages:1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) static void fuse_copy_init(struct fuse_copy_state *cs, int write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			   struct iov_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	memset(cs, 0, sizeof(*cs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	cs->write = write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	cs->iter = iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) /* Unmap and put previous page of userspace buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) static void fuse_copy_finish(struct fuse_copy_state *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	if (cs->currbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		struct pipe_buffer *buf = cs->currbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		if (cs->write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			buf->len = PAGE_SIZE - cs->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		cs->currbuf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	} else if (cs->pg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		if (cs->write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 			flush_dcache_page(cs->pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			set_page_dirty_lock(cs->pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		 * The page could be GUP page(see iov_iter_get_pages in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		 * fuse_copy_fill) so use put_user_page to release it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		put_user_page(cs->pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	cs->pg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  * Get another pagefull of userspace buffer, and map it to kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698)  * address space, and lock request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) static int fuse_copy_fill(struct fuse_copy_state *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	err = unlock_request(cs->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	if (cs->pipebufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		struct pipe_buffer *buf = cs->pipebufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		if (!cs->write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			err = pipe_buf_confirm(cs->pipe, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 			BUG_ON(!cs->nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			cs->currbuf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			cs->pg = buf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			cs->offset = buf->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			cs->len = buf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			cs->pipebufs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			cs->nr_segs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 			if (cs->nr_segs >= cs->pipe->max_usage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 				return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 			page = alloc_page(GFP_HIGHUSER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 			if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 			buf->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 			buf->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 			buf->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			cs->currbuf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 			cs->pg = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 			cs->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			cs->len = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			cs->pipebufs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			cs->nr_segs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		size_t off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		BUG_ON(!err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		cs->len = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		cs->offset = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		cs->pg = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		iov_iter_advance(cs->iter, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	return lock_request(cs->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) /* Do as much copy to/from userspace buffer as we can */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	unsigned ncpy = min(*size, cs->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		void *pgaddr = kmap_atomic(cs->pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		void *buf = pgaddr + cs->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		if (cs->write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			memcpy(buf, *val, ncpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			memcpy(*val, buf, ncpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		kunmap_atomic(pgaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		*val += ncpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	*size -= ncpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	cs->len -= ncpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	cs->offset += ncpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	return ncpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) static int fuse_check_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	if (page_mapcount(page) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	    page->mapping != NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	    (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	     ~(1 << PG_locked |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	       1 << PG_referenced |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	       1 << PG_uptodate |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	       1 << PG_lru |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	       1 << PG_active |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	       1 << PG_workingset |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	       1 << PG_reclaim |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	       1 << PG_waiters))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		dump_page(page, "fuse: trying to steal weird page");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	struct page *oldpage = *pagep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	struct page *newpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	struct pipe_buffer *buf = cs->pipebufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	get_page(oldpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	err = unlock_request(cs->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		goto out_put_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	err = pipe_buf_confirm(cs->pipe, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		goto out_put_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	BUG_ON(!cs->nr_segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	cs->currbuf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	cs->len = buf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	cs->pipebufs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	cs->nr_segs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	if (cs->len != PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		goto out_fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	if (!pipe_buf_try_steal(cs->pipe, buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		goto out_fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	newpage = buf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	if (!PageUptodate(newpage))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		SetPageUptodate(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	ClearPageMappedToDisk(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	if (fuse_check_page(newpage) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		goto out_fallback_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	 * This is a new and locked page, it shouldn't be mapped or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	 * have any special flags on it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	if (WARN_ON(page_mapped(oldpage)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		goto out_fallback_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	if (WARN_ON(page_has_private(oldpage)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		goto out_fallback_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		goto out_fallback_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	if (WARN_ON(PageMlocked(oldpage)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		goto out_fallback_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		unlock_page(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		goto out_put_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	get_page(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	if (!(buf->flags & PIPE_BUF_FLAG_LRU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		lru_cache_add(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	 * Release while we have extra ref on stolen page.  Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	 * anon_pipe_buf_release() might think the page can be reused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	pipe_buf_release(cs->pipe, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	spin_lock(&cs->req->waitq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	if (test_bit(FR_ABORTED, &cs->req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		*pagep = newpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	spin_unlock(&cs->req->waitq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		unlock_page(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		put_page(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		goto out_put_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	unlock_page(oldpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	/* Drop ref for ap->pages[] array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	put_page(oldpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	cs->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) out_put_old:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	/* Drop ref obtained in this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	put_page(oldpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) out_fallback_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	unlock_page(newpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) out_fallback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	cs->pg = buf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	cs->offset = buf->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	err = lock_request(cs->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		err = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	goto out_put_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			 unsigned offset, unsigned count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	struct pipe_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (cs->nr_segs >= cs->pipe->max_usage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	get_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	err = unlock_request(cs->req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	buf = cs->pipebufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	buf->page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	buf->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	buf->len = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	cs->pipebufs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	cs->nr_segs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	cs->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  * Copy a page in the request to/from the userspace buffer.  Must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  * done atomically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			  unsigned offset, unsigned count, int zeroing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	struct page *page = *pagep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	if (page && zeroing && count < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		clear_highpage(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		if (cs->write && cs->pipebufs && page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 			 * Can't control lifetime of pipe buffers, so always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			 * copy user pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 			if (cs->req->args->user_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 				err = fuse_copy_fill(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 				if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 					return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 				return fuse_ref_page(cs, page, offset, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		} else if (!cs->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 			if (cs->move_pages && page &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			    offset == 0 && count == PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 				err = fuse_try_move_page(cs, pagep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 				if (err <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 					return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 				err = fuse_copy_fill(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 				if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 					return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			void *mapaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			void *buf = mapaddr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			offset += fuse_copy_do(cs, &buf, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			kunmap_atomic(mapaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			offset += fuse_copy_do(cs, NULL, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	if (page && !cs->write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) /* Copy pages in the request to/from userspace buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			   int zeroing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	struct fuse_req *req = cs->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		unsigned int offset = ap->descs[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		unsigned int count = min(nbytes, ap->descs[i].length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		nbytes -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /* Copy a single argument in the request to/from userspace buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	while (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		if (!cs->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 			int err = fuse_copy_fill(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 				return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		fuse_copy_do(cs, &val, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /* Copy request arguments to/from userspace buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 			  unsigned argpages, struct fuse_arg *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			  int zeroing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	for (i = 0; !err && i < numargs; i++)  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		struct fuse_arg *arg = &args[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		if (i == numargs - 1 && argpages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			err = fuse_copy_pages(cs, arg->size, zeroing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			err = fuse_copy_one(cs, arg->value, arg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) static int forget_pending(struct fuse_iqueue *fiq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	return fiq->forget_list_head.next != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) static int request_pending(struct fuse_iqueue *fiq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		forget_pending(fiq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)  * Transfer an interrupt request to userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)  * Unlike other requests this is assembled on demand, without a need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)  * to allocate a separate fuse_req structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)  * Called with fiq->lock held, releases it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static int fuse_read_interrupt(struct fuse_iqueue *fiq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			       struct fuse_copy_state *cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			       size_t nbytes, struct fuse_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) __releases(fiq->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	struct fuse_in_header ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	struct fuse_interrupt_in arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	unsigned reqsize = sizeof(ih) + sizeof(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	list_del_init(&req->intr_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	memset(&ih, 0, sizeof(ih));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	memset(&arg, 0, sizeof(arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	ih.len = reqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	ih.opcode = FUSE_INTERRUPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	arg.unique = req->in.h.unique;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	if (nbytes < reqsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	err = fuse_copy_one(cs, &ih, sizeof(ih));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		err = fuse_copy_one(cs, &arg, sizeof(arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	return err ? err : reqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 					     unsigned int max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 					     unsigned int *countp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	struct fuse_forget_link *head = fiq->forget_list_head.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	struct fuse_forget_link **newhead = &head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	unsigned count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	for (count = 0; *newhead != NULL && count < max; count++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		newhead = &(*newhead)->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	fiq->forget_list_head.next = *newhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	*newhead = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	if (fiq->forget_list_head.next == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		fiq->forget_list_tail = &fiq->forget_list_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	if (countp != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		*countp = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	return head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) EXPORT_SYMBOL(fuse_dequeue_forget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static int fuse_read_single_forget(struct fuse_iqueue *fiq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 				   struct fuse_copy_state *cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 				   size_t nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) __releases(fiq->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	struct fuse_forget_in arg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		.nlookup = forget->forget_one.nlookup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	struct fuse_in_header ih = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		.opcode = FUSE_FORGET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		.nodeid = forget->forget_one.nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		.unique = fuse_get_unique(fiq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		.len = sizeof(ih) + sizeof(arg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	kfree(forget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	if (nbytes < ih.len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	err = fuse_copy_one(cs, &ih, sizeof(ih));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		err = fuse_copy_one(cs, &arg, sizeof(arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	return ih.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 				   struct fuse_copy_state *cs, size_t nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) __releases(fiq->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	unsigned max_forgets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	unsigned count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	struct fuse_forget_link *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	struct fuse_batch_forget_in arg = { .count = 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	struct fuse_in_header ih = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		.opcode = FUSE_BATCH_FORGET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		.unique = fuse_get_unique(fiq),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 		.len = sizeof(ih) + sizeof(arg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	if (nbytes < ih.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	head = fuse_dequeue_forget(fiq, max_forgets, &count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	arg.count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	ih.len += count * sizeof(struct fuse_forget_one);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	err = fuse_copy_one(cs, &ih, sizeof(ih));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		err = fuse_copy_one(cs, &arg, sizeof(arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	while (head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		struct fuse_forget_link *forget = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			err = fuse_copy_one(cs, &forget->forget_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 					    sizeof(forget->forget_one));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		head = forget->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 		kfree(forget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	return ih.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			    struct fuse_copy_state *cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			    size_t nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) __releases(fiq->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		return fuse_read_single_forget(fiq, cs, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		return fuse_read_batch_forget(fiq, cs, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)  * Read a single request into the userspace filesystem's buffer.  This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)  * function waits until a request is available, then removes it from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)  * the pending list and copies request data to userspace buffer.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)  * no reply is needed (FORGET) or request has been aborted or there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)  * was an error during the copying then it's finished by calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)  * fuse_request_end().  Otherwise add it to the processing list, and set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)  * the 'sent' flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 				struct fuse_copy_state *cs, size_t nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	ssize_t err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	struct fuse_conn *fc = fud->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	struct fuse_iqueue *fiq = &fc->iq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	struct fuse_pqueue *fpq = &fud->pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	struct fuse_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	struct fuse_args *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	unsigned reqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	unsigned int hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	 * Require sane minimum read buffer - that has capacity for fixed part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	 * of any request header + negotiated max_write room for data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	 * Historically libfuse reserves 4K for fixed header room, but e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	 * GlusterFS reserves only 80 bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	 *	= `sizeof(fuse_in_header) + sizeof(fuse_write_in)`
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	 * which is the absolute minimum any sane filesystem should be using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	 * for header room.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 			   sizeof(struct fuse_in_header) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 			   sizeof(struct fuse_write_in) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 			   fc->max_write))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)  restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		spin_lock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		if (!fiq->connected || request_pending(fiq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		if (file->f_flags & O_NONBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		err = wait_event_interruptible_exclusive(fiq->waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 				!fiq->connected || request_pending(fiq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	if (!fiq->connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		err = fc->aborted ? -ECONNABORTED : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	if (!list_empty(&fiq->interrupts)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		req = list_entry(fiq->interrupts.next, struct fuse_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 				 intr_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		return fuse_read_interrupt(fiq, cs, nbytes, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	if (forget_pending(fiq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 			return fuse_read_forget(fc, fiq, cs, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		if (fiq->forget_batch <= -8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 			fiq->forget_batch = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	req = list_entry(fiq->pending.next, struct fuse_req, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	clear_bit(FR_PENDING, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	list_del_init(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	args = req->args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	reqsize = req->in.h.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	/* If request is too large, reply with an error and restart the read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	if (nbytes < reqsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		req->out.h.error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		/* SETXATTR is special, since it may contain too large data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		if (args->opcode == FUSE_SETXATTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 			req->out.h.error = -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 		fuse_request_end(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	spin_lock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	 *  Must not put request on fpq->io queue after having been shut down by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	 *  fuse_abort_conn()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	if (!fpq->connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		req->out.h.error = err = -ECONNABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		goto out_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	list_add(&req->list, &fpq->io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	spin_unlock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	cs->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		err = fuse_copy_args(cs, args->in_numargs, args->in_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 				     (struct fuse_arg *) args->in_args, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	spin_lock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	clear_bit(FR_LOCKED, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	if (!fpq->connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		err = fc->aborted ? -ECONNABORTED : -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		goto out_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		req->out.h.error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		goto out_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	if (!test_bit(FR_ISREPLY, &req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		err = reqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		goto out_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	hash = fuse_req_hash(req->in.h.unique);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	list_move_tail(&req->list, &fpq->processing[hash]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	__fuse_get_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	set_bit(FR_SENT, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	spin_unlock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	/* matches barrier in request_wait_answer() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	if (test_bit(FR_INTERRUPTED, &req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		queue_interrupt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	fuse_put_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	return reqsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) out_end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	if (!test_bit(FR_PRIVATE, &req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		list_del_init(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	spin_unlock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	fuse_request_end(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)  err_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) static int fuse_dev_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	 * The fuse device's file's private_data is used to hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	 * the fuse_conn(ection) when it is mounted, and is used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	 * keep track of whether the file has been mounted already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	file->private_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	struct fuse_copy_state cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	struct fuse_dev *fud = fuse_get_dev(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	if (!fud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	if (!iter_is_iovec(to))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	fuse_copy_init(&cs, 1, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 				    struct pipe_inode_info *pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 				    size_t len, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	int total, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	int page_nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	struct pipe_buffer *bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	struct fuse_copy_state cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	struct fuse_dev *fud = fuse_get_dev(in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	if (!fud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 			      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	if (!bufs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	fuse_copy_init(&cs, 1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	cs.pipebufs = bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	cs.pipe = pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	ret = fuse_dev_do_read(fud, in, &cs, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		 * Need to be careful about this.  Having buf->ops in module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		 * code can Oops if the buffer persists after module unload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		bufs[page_nr].ops = &nosteal_pipe_buf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		bufs[page_nr].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		ret = add_to_pipe(pipe, &bufs[page_nr++]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		if (unlikely(ret < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	if (total)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		ret = total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	for (; page_nr < cs.nr_segs; page_nr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		put_page(bufs[page_nr].page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	kvfree(bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 			    struct fuse_copy_state *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	struct fuse_notify_poll_wakeup_out outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	if (size != sizeof(outarg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	return fuse_notify_poll_wakeup(fc, &outarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 				   struct fuse_copy_state *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	struct fuse_notify_inval_inode_out outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	if (size != sizeof(outarg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	down_read(&fc->killsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	err = fuse_reverse_inval_inode(fc, outarg.ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 				       outarg.off, outarg.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	up_read(&fc->killsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 				   struct fuse_copy_state *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	struct fuse_notify_inval_entry_out outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	struct qstr name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	if (size < sizeof(outarg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	err = -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	if (outarg.namelen > FUSE_NAME_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	if (size != sizeof(outarg) + outarg.namelen + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	name.name = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	name.len = outarg.namelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	err = fuse_copy_one(cs, buf, outarg.namelen + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	buf[outarg.namelen] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	down_read(&fc->killsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	up_read(&fc->killsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 			      struct fuse_copy_state *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	struct fuse_notify_delete_out outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	struct qstr name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	if (size < sizeof(outarg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	err = -ENAMETOOLONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	if (outarg.namelen > FUSE_NAME_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	if (size != sizeof(outarg) + outarg.namelen + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	name.name = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	name.len = outarg.namelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	err = fuse_copy_one(cs, buf, outarg.namelen + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	buf[outarg.namelen] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	down_read(&fc->killsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	up_read(&fc->killsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 			     struct fuse_copy_state *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	struct fuse_notify_store_out outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	u64 nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	pgoff_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	unsigned int num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	loff_t file_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	loff_t end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	if (size < sizeof(outarg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	if (size - sizeof(outarg) != outarg.size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		goto out_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	nodeid = outarg.nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	down_read(&fc->killsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	inode = fuse_ilookup(fc, nodeid,  NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		goto out_up_killsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	index = outarg.offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	offset = outarg.offset & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	file_size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	end = outarg.offset + outarg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	if (end > file_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		file_size = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		fuse_write_update_size(inode, file_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	num = outarg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	while (num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		unsigned int this_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		page = find_or_create_page(mapping, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 					   mapping_gfp_mask(mapping));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 			goto out_iput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		this_num = min_t(unsigned, num, PAGE_SIZE - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		err = fuse_copy_page(cs, &page, offset, this_num, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		if (!err && offset == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		    (this_num == PAGE_SIZE || file_size == end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 			SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			goto out_iput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		num -= this_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) out_iput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) out_up_killsb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	up_read(&fc->killsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) out_finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) struct fuse_retrieve_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	struct fuse_args_pages ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	struct fuse_notify_retrieve_in inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 			      int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	struct fuse_retrieve_args *ra =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		container_of(args, typeof(*ra), ap.args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	release_pages(ra->ap.pages, ra->ap.num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	kfree(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			 struct fuse_notify_retrieve_out *outarg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	pgoff_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	loff_t file_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	unsigned int num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	size_t total_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	unsigned int num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	struct fuse_conn *fc = fm->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	struct fuse_retrieve_args *ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	size_t args_size = sizeof(*ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	struct fuse_args_pages *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	struct fuse_args *args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	offset = outarg->offset & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	file_size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	num = min(outarg->size, fc->max_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	if (outarg->offset > file_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	else if (outarg->offset + num > file_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		num = file_size - outarg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	num_pages = min(num_pages, fc->max_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	ra = kzalloc(args_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	if (!ra)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	ap = &ra->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	ap->pages = (void *) (ra + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	ap->descs = (void *) (ap->pages + num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	args = &ap->args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	args->nodeid = outarg->nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	args->opcode = FUSE_NOTIFY_REPLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	args->in_numargs = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	args->in_pages = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	args->end = fuse_retrieve_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	index = outarg->offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	while (num && ap->num_pages < num_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 		unsigned int this_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		page = find_get_page(mapping, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		this_num = min_t(unsigned, num, PAGE_SIZE - offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		ap->pages[ap->num_pages] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		ap->descs[ap->num_pages].offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		ap->descs[ap->num_pages].length = this_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		ap->num_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		num -= this_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		total_len += this_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	ra->inarg.offset = outarg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	ra->inarg.size = total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	args->in_args[0].size = sizeof(ra->inarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	args->in_args[0].value = &ra->inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	args->in_args[1].size = total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	err = fuse_simple_notify_reply(fm, args, outarg->notify_unique);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		fuse_retrieve_end(fm, args, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 				struct fuse_copy_state *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	struct fuse_notify_retrieve_out outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	struct fuse_mount *fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	u64 nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	if (size != sizeof(outarg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 		goto copy_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	err = fuse_copy_one(cs, &outarg, sizeof(outarg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		goto copy_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	down_read(&fc->killsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	nodeid = outarg.nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	inode = fuse_ilookup(fc, nodeid, &fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	if (inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 		err = fuse_retrieve(fm, inode, &outarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	up_read(&fc->killsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) copy_finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		       unsigned int size, struct fuse_copy_state *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	/* Don't try to move pages (yet) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	cs->move_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	switch (code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	case FUSE_NOTIFY_POLL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		return fuse_notify_poll(fc, size, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	case FUSE_NOTIFY_INVAL_INODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		return fuse_notify_inval_inode(fc, size, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	case FUSE_NOTIFY_INVAL_ENTRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		return fuse_notify_inval_entry(fc, size, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	case FUSE_NOTIFY_STORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		return fuse_notify_store(fc, size, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	case FUSE_NOTIFY_RETRIEVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 		return fuse_notify_retrieve(fc, size, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	case FUSE_NOTIFY_DELETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		return fuse_notify_delete(fc, size, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 		fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) /* Look up request on processing list by unique ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	unsigned int hash = fuse_req_hash(unique);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	struct fuse_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	list_for_each_entry(req, &fpq->processing[hash], list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		if (req->in.h.unique == unique)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 			return req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 			 unsigned nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	unsigned reqsize = sizeof(struct fuse_out_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	reqsize += fuse_len_args(args->out_numargs, args->out_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	else if (reqsize > nbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 		struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		unsigned diffsize = reqsize - nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		if (diffsize > lastarg->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		lastarg->size -= diffsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	return fuse_copy_args(cs, args->out_numargs, args->out_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 			      args->out_args, args->page_zeroing);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)  * Write a single reply to a request.  First the header is copied from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)  * the write buffer.  The request is then searched on the processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)  * list by the unique ID found in the header.  If found, then remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)  * it from the list and copy the rest of the buffer to the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)  * The request is finished by calling fuse_request_end().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 				 struct fuse_copy_state *cs, size_t nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	struct fuse_conn *fc = fud->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	struct fuse_pqueue *fpq = &fud->pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	struct fuse_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	struct fuse_out_header oh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	if (nbytes < sizeof(struct fuse_out_header))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	err = fuse_copy_one(cs, &oh, sizeof(oh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		goto copy_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	if (oh.len != nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		goto copy_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	 * Zero oh.unique indicates unsolicited notification message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	 * and error contains notification code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	if (!oh.unique) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	if (oh.error <= -512 || oh.error > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		goto copy_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	spin_lock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	if (fpq->connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	if (!req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		spin_unlock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		goto copy_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	/* Is it an interrupt reply ID? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	if (oh.unique & FUSE_INT_REQ_BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		__fuse_get_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 		spin_unlock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		if (nbytes != sizeof(struct fuse_out_header))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 			err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		else if (oh.error == -ENOSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 			fc->no_interrupt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		else if (oh.error == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			err = queue_interrupt(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		fuse_put_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		goto copy_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	clear_bit(FR_SENT, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	list_move(&req->list, &fpq->io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	req->out.h = oh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	set_bit(FR_LOCKED, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	spin_unlock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	cs->req = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	if (!req->args->page_replace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		cs->move_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	if (oh.error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		err = nbytes != sizeof(oh) ? -EINVAL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		err = copy_out_args(cs, req->args, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	if (!err && req->in.h.opcode == FUSE_CANONICAL_PATH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		char *path = (char *)req->args->out_args[0].value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 		path[req->args->out_args[0].size - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 		if (req->out.h.error != -ENOSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 			req->out.h.error = kern_path(path, 0, req->args->canonical_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	spin_lock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	clear_bit(FR_LOCKED, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	if (!fpq->connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	else if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		req->out.h.error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	if (!test_bit(FR_PRIVATE, &req->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		list_del_init(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	spin_unlock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	fuse_request_end(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	return err ? err : nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) copy_finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	fuse_copy_finish(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	struct fuse_copy_state cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	if (!fud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	if (!iter_is_iovec(from))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	fuse_copy_init(&cs, 0, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 				     struct file *out, loff_t *ppos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 				     size_t len, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	unsigned int head, tail, mask, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	unsigned nbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	unsigned idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	struct pipe_buffer *bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	struct fuse_copy_state cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	struct fuse_dev *fud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	size_t rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 	fud = fuse_get_dev(out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	if (!fud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	pipe_lock(pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	head = pipe->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	tail = pipe->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	mask = pipe->ring_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	count = head - tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	if (!bufs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		pipe_unlock(pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	nbuf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	rem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	for (idx = tail; idx != head && rem < len; idx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 		rem += pipe->bufs[idx & mask].len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	if (rem < len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	rem = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	while (rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		struct pipe_buffer *ibuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 		struct pipe_buffer *obuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		if (WARN_ON(nbuf >= count || tail == head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 			goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		ibuf = &pipe->bufs[tail & mask];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		obuf = &bufs[nbuf];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		if (rem >= ibuf->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 			*obuf = *ibuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 			ibuf->ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 			tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 			pipe->tail = tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 			if (!pipe_buf_get(pipe, ibuf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 				goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 			*obuf = *ibuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 			obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 			obuf->len = rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 			ibuf->offset += obuf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 			ibuf->len -= obuf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		nbuf++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		rem -= obuf->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	pipe_unlock(pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	fuse_copy_init(&cs, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	cs.pipebufs = bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	cs.nr_segs = nbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	cs.pipe = pipe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	if (flags & SPLICE_F_MOVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		cs.move_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	ret = fuse_dev_do_write(fud, &cs, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	pipe_lock(pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	for (idx = 0; idx < nbuf; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		struct pipe_buffer *buf = &bufs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		if (buf->ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 			pipe_buf_release(pipe, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	pipe_unlock(pipe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	kvfree(bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	__poll_t mask = EPOLLOUT | EPOLLWRNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	struct fuse_iqueue *fiq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	struct fuse_dev *fud = fuse_get_dev(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	if (!fud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		return EPOLLERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	fiq = &fud->fc->iq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	poll_wait(file, &fiq->waitq, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	spin_lock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	if (!fiq->connected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		mask = EPOLLERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	else if (request_pending(fiq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		mask |= EPOLLIN | EPOLLRDNORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	return mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) /* Abort all requests on the given list (pending or processing) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) static void end_requests(struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	while (!list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 		struct fuse_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		req = list_entry(head->next, struct fuse_req, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		req->out.h.error = -ECONNABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 		clear_bit(FR_SENT, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		list_del_init(&req->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		fuse_request_end(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) static void end_polls(struct fuse_conn *fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	struct rb_node *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	p = rb_first(&fc->polled_files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	while (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		struct fuse_file *ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		ff = rb_entry(p, struct fuse_file, polled_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 		wake_up_interruptible_all(&ff->poll_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		p = rb_next(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)  * Abort all requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)  * Emergency exit in case of a malicious or accidental deadlock, or just a hung
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)  * filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)  * The same effect is usually achievable through killing the filesystem daemon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)  * and all users of the filesystem.  The exception is the combination of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)  * asynchronous request and the tricky deadlock (see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)  * Documentation/filesystems/fuse.rst).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)  * Aborting requests under I/O goes as follows: 1: Separate out unlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)  * requests, they should be finished off immediately.  Locked requests will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)  * finished after unlock; see unlock_request(). 2: Finish off the unlocked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)  * requests.  It is possible that some request will finish before we can.  This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)  * is OK, the request will in that case be removed from the list before we touch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)  * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) void fuse_abort_conn(struct fuse_conn *fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	struct fuse_iqueue *fiq = &fc->iq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	spin_lock(&fc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	if (fc->connected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 		struct fuse_dev *fud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 		struct fuse_req *req, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 		LIST_HEAD(to_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 		unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 		/* Background queuing checks fc->connected under bg_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		spin_lock(&fc->bg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		fc->connected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		spin_unlock(&fc->bg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 		fuse_set_initialized(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 		list_for_each_entry(fud, &fc->devices, entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 			struct fuse_pqueue *fpq = &fud->pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 			spin_lock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 			fpq->connected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 			list_for_each_entry_safe(req, next, &fpq->io, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 				req->out.h.error = -ECONNABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 				spin_lock(&req->waitq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 				set_bit(FR_ABORTED, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 				if (!test_bit(FR_LOCKED, &req->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 					set_bit(FR_PRIVATE, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 					__fuse_get_request(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 					list_move(&req->list, &to_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 				spin_unlock(&req->waitq.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 			for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 				list_splice_tail_init(&fpq->processing[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 						      &to_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 			spin_unlock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		spin_lock(&fc->bg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 		fc->blocked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 		fc->max_background = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		flush_bg_queue(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		spin_unlock(&fc->bg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		spin_lock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		fiq->connected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 		list_for_each_entry(req, &fiq->pending, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 			clear_bit(FR_PENDING, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		list_splice_tail_init(&fiq->pending, &to_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		while (forget_pending(fiq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 			kfree(fuse_dequeue_forget(fiq, 1, NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 		wake_up_all(&fiq->waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		spin_unlock(&fiq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		end_polls(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		wake_up_all(&fc->blocked_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		spin_unlock(&fc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		end_requests(&to_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 		spin_unlock(&fc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) EXPORT_SYMBOL_GPL(fuse_abort_conn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) void fuse_wait_aborted(struct fuse_conn *fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	/* matches implicit memory barrier in fuse_drop_waiting() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	smp_mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) int fuse_dev_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	struct fuse_dev *fud = fuse_get_dev(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	if (fud) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 		struct fuse_conn *fc = fud->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 		struct fuse_pqueue *fpq = &fud->pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 		LIST_HEAD(to_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		spin_lock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		WARN_ON(!list_empty(&fpq->io));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 			list_splice_init(&fpq->processing[i], &to_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		spin_unlock(&fpq->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		end_requests(&to_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		/* Are we the last open device? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 		if (atomic_dec_and_test(&fc->dev_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 			WARN_ON(fc->iq.fasync != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 			fuse_abort_conn(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		fuse_dev_free(fud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) EXPORT_SYMBOL_GPL(fuse_dev_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) static int fuse_dev_fasync(int fd, struct file *file, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	struct fuse_dev *fud = fuse_get_dev(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	if (!fud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	/* No locking - fasync_helper does its own locking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	struct fuse_dev *fud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	if (new->private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	fud = fuse_dev_alloc_install(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	if (!fud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	new->private_data = fud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	atomic_inc(&fc->dev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 			   unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	int oldfd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	struct fuse_dev *fud = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	case FUSE_DEV_IOC_CLONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		res = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 		if (!get_user(oldfd, (__u32 __user *)arg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 			struct file *old = fget(oldfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 			res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 			if (old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 				 * Check against file->f_op because CUSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 				 * uses the same ioctl handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 				if (old->f_op == file->f_op &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 				    old->f_cred->user_ns ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 					    file->f_cred->user_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 					fud = fuse_get_dev(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 				if (fud) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 					mutex_lock(&fuse_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 					res = fuse_device_clone(fud->fc, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 					mutex_unlock(&fuse_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 				fput(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	case FUSE_DEV_IOC_PASSTHROUGH_OPEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		res = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		if (!get_user(oldfd, (__u32 __user *)arg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 			res = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 			fud = fuse_get_dev(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 			if (fud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 				res = fuse_passthrough_open(fud, oldfd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 		res = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) const struct file_operations fuse_dev_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	.open		= fuse_dev_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	.llseek		= no_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	.read_iter	= fuse_dev_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	.splice_read	= fuse_dev_splice_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	.write_iter	= fuse_dev_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	.splice_write	= fuse_dev_splice_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	.poll		= fuse_dev_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	.release	= fuse_dev_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	.fasync		= fuse_dev_fasync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	.unlocked_ioctl = fuse_dev_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	.compat_ioctl   = compat_ptr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) EXPORT_SYMBOL_GPL(fuse_dev_operations);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) static struct miscdevice fuse_miscdevice = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	.minor = FUSE_MINOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	.name  = "fuse",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	.fops = &fuse_dev_operations,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) int __init fuse_dev_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	fuse_req_cachep = kmem_cache_create("fuse_request",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 					    sizeof(struct fuse_req),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 					    0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	if (!fuse_req_cachep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	err = misc_register(&fuse_miscdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		goto out_cache_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)  out_cache_clean:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	kmem_cache_destroy(fuse_req_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)  out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) void fuse_dev_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	misc_deregister(&fuse_miscdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	kmem_cache_destroy(fuse_req_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) }