^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) FUSE: Filesystem in Userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) This program can be distributed under the terms of the GNU GPL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) See the file COPYING.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "fuse_i.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/falloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct fuse_page_desc **desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) pages = kzalloc(npages * (sizeof(struct page *) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) sizeof(struct fuse_page_desc)), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *desc = (void *) (pages + npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static int fuse_send_open(struct fuse_mount *fm, u64 nodeid, struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) int opcode, struct fuse_open_out *outargp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct fuse_open_in inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) FUSE_ARGS(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) memset(&inarg, 0, sizeof(inarg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (!fm->fc->atomic_o_trunc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) inarg.flags &= ~O_TRUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) args.opcode = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) args.nodeid = nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) args.in_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) args.in_args[0].size = sizeof(inarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) args.in_args[0].value = &inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) args.out_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) args.out_args[0].size = sizeof(*outargp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) args.out_args[0].value = outargp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return fuse_simple_request(fm, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct fuse_release_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct fuse_args args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct fuse_release_in inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct fuse_file *fuse_file_alloc(struct fuse_mount *fm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct fuse_file *ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (unlikely(!ff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ff->fm = fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ff->release_args = kzalloc(sizeof(*ff->release_args),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) GFP_KERNEL_ACCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!ff->release_args) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) kfree(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) INIT_LIST_HEAD(&ff->write_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) mutex_init(&ff->readdir.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) refcount_set(&ff->count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) RB_CLEAR_NODE(&ff->polled_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) init_waitqueue_head(&ff->poll_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) ff->kh = atomic64_inc_return(&fm->fc->khctr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void fuse_file_free(struct fuse_file *ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) kfree(ff->release_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) mutex_destroy(&ff->readdir.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) kfree(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static struct fuse_file *fuse_file_get(struct fuse_file *ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) refcount_inc(&ff->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct fuse_release_args *ra = container_of(args, typeof(*ra), args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) iput(ra->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) kfree(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (refcount_dec_and_test(&ff->count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct fuse_args *args = &ff->release_args->args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /* Do nothing when client does not implement 'open' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) fuse_release_end(ff->fm, args, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) } else if (sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) fuse_simple_request(ff->fm, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) fuse_release_end(ff->fm, args, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) args->end = fuse_release_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (fuse_simple_background(ff->fm, args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) GFP_KERNEL | __GFP_NOFAIL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) fuse_release_end(ff->fm, args, -ENOTCONN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) kfree(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) bool isdir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct fuse_conn *fc = fm->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct fuse_file *ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) ff = fuse_file_alloc(fm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ff->fh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) /* Default for no-open */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (isdir ? !fc->no_opendir : !fc->no_open) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct fuse_open_out outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) err = fuse_send_open(fm, nodeid, file, opcode, &outarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ff->fh = outarg.fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ff->open_flags = outarg.open_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) fuse_passthrough_setup(fc, ff, &outarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) } else if (err != -ENOSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) fuse_file_free(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (isdir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) fc->no_opendir = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) fc->no_open = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (isdir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) ff->open_flags &= ~FOPEN_DIRECT_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ff->nodeid = nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) file->private_data = ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) EXPORT_SYMBOL_GPL(fuse_do_open);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void fuse_link_write_file(struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * file may be written through mmap, so chain it onto the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * inodes's write_file list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (list_empty(&ff->write_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) list_add(&ff->write_entry, &fi->write_files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) void fuse_finish_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (ff->open_flags & FOPEN_STREAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) stream_open(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) else if (ff->open_flags & FOPEN_NONSEEKABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) nonseekable_open(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) fi->attr_version = atomic64_inc_return(&fc->attr_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) i_size_write(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) truncate_pagecache(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) fuse_invalidate_attr(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (fc->writeback_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) file_update_time(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) } else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) invalidate_inode_pages2(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) fuse_link_write_file(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct fuse_mount *fm = get_fuse_mount(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) struct fuse_conn *fc = fm->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) bool is_wb_truncate = (file->f_flags & O_TRUNC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) fc->atomic_o_trunc &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) fc->writeback_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) bool dax_truncate = (file->f_flags & O_TRUNC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) fc->atomic_o_trunc && FUSE_IS_DAX(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (fuse_is_bad(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) err = generic_file_open(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (is_wb_truncate || dax_truncate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) fuse_set_nowrite(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (dax_truncate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) down_write(&get_fuse_inode(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) err = fuse_dax_break_layouts(inode, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) err = fuse_do_open(fm, get_node_id(inode), file, isdir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) fuse_finish_open(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (dax_truncate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) up_write(&get_fuse_inode(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (is_wb_truncate | dax_truncate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) fuse_release_nowrite(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) int flags, int opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct fuse_conn *fc = ff->fm->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) struct fuse_release_args *ra = ff->release_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /* Inode is NULL on error path of fuse_create_open() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (likely(fi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) list_del(&ff->write_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) spin_lock(&fc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!RB_EMPTY_NODE(&ff->polled_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) rb_erase(&ff->polled_node, &fc->polled_files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) spin_unlock(&fc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) wake_up_interruptible_all(&ff->poll_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ra->inarg.fh = ff->fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ra->inarg.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ra->args.in_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ra->args.in_args[0].size = sizeof(struct fuse_release_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) ra->args.in_args[0].value = &ra->inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) ra->args.opcode = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ra->args.nodeid = ff->nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) ra->args.force = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ra->args.nocreds = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) void fuse_release_common(struct file *file, bool isdir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct fuse_inode *fi = get_fuse_inode(file_inode(file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct fuse_release_args *ra = ff->release_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) fuse_passthrough_release(&ff->passthrough);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) fuse_prepare_release(fi, ff, file->f_flags, opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (ff->flock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) (fl_owner_t) file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /* Hold inode until release is finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ra->inode = igrab(file_inode(file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * Normally this will send the RELEASE request, however if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * some asynchronous READ or WRITE requests are outstanding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * the sending will be delayed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * Make the release synchronous if this is a fuseblk mount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * synchronous RELEASE is allowed (and desirable) in this case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * because the server can be trusted not to screw up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) fuse_file_put(ff, ff->fm->fc->destroy, isdir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) static int fuse_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return fuse_open_common(inode, file, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static int fuse_release(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* see fuse_vma_close() for !writeback_cache case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (fc->writeback_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) write_inode_now(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) fuse_release_common(file, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* return value is ignored by VFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) WARN_ON(refcount_read(&ff->count) > 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) fuse_prepare_release(fi, ff, flags, FUSE_RELEASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * iput(NULL) is a no-op and since the refcount is 1 and everything's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * synchronous, we are fine with not doing igrab() here"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) fuse_file_put(ff, true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) EXPORT_SYMBOL_GPL(fuse_sync_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * Scramble the ID space with XTEA, so that the value of the files_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * pointer is not exposed to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) u32 *k = fc->scramble_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) u64 v = (unsigned long) id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) u32 v0 = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) u32 v1 = v >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) u32 sum = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) for (i = 0; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) sum += 0x9E3779B9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return (u64) v0 + ((u64) v1 << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct fuse_writepage_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct fuse_io_args ia;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct rb_node writepages_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct list_head queue_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) struct fuse_writepage_args *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) pgoff_t idx_from, pgoff_t idx_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct rb_node *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) n = fi->writepages.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct fuse_writepage_args *wpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) pgoff_t curr_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) WARN_ON(get_fuse_inode(wpa->inode) != fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (idx_from >= curr_index + wpa->ia.ap.num_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) else if (idx_to < curr_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return wpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * Check if any page in a range is under writeback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * This is currently done by walking the list of writepage requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * for the inode, which can be pretty inefficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) pgoff_t idx_to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) bool found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) found = fuse_find_writeback(fi, idx_from, idx_to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return fuse_range_is_writeback(inode, index, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * Wait for page writeback to be completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * Since fuse doesn't rely on the VM writeback tracking, this has to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * use some other means.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * Wait for all pending writepages on the inode to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * This is currently done by blocking further writes with FUSE_NOWRITE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * and waiting for all sent writes to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * could conflict with truncation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static void fuse_sync_writes(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) fuse_set_nowrite(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) fuse_release_nowrite(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static int fuse_flush(struct file *file, fl_owner_t id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct fuse_mount *fm = get_fuse_mount(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct fuse_flush_in inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) FUSE_ARGS(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (fuse_is_bad(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) err = write_inode_now(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) fuse_sync_writes(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) err = filemap_check_errors(file->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (fm->fc->no_flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) goto inval_attr_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) memset(&inarg, 0, sizeof(inarg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) inarg.fh = ff->fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) inarg.lock_owner = fuse_lock_owner_id(fm->fc, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) args.opcode = FUSE_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) args.nodeid = get_node_id(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) args.in_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) args.in_args[0].size = sizeof(inarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) args.in_args[0].value = &inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) args.force = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) err = fuse_simple_request(fm, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (err == -ENOSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) fm->fc->no_flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) inval_attr_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * In memory i_blocks is not maintained by fuse, if writeback cache is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * enabled, i_blocks from cached attr may not be accurate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (!err && fm->fc->writeback_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) fuse_invalidate_attr(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) int datasync, int opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct fuse_mount *fm = get_fuse_mount(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) FUSE_ARGS(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct fuse_fsync_in inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) memset(&inarg, 0, sizeof(inarg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) inarg.fh = ff->fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) args.opcode = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) args.nodeid = get_node_id(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) args.in_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) args.in_args[0].size = sizeof(inarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) args.in_args[0].value = &inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return fuse_simple_request(fm, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) static int fuse_fsync(struct file *file, loff_t start, loff_t end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) int datasync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (fuse_is_bad(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * Start writeback against all dirty pages of the inode, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * wait for all outstanding writes, before sending the FSYNC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) err = file_write_and_wait_range(file, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) fuse_sync_writes(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * Due to implementation of fuse writeback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * file_write_and_wait_range() does not catch errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * We have to do this directly after fuse_sync_writes()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) err = file_check_and_advance_wb_err(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) err = sync_inode_metadata(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (fc->no_fsync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (err == -ENOSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) fc->no_fsync = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) size_t count, int opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct fuse_args *args = &ia->ap.args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) ia->read.in.fh = ff->fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ia->read.in.offset = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) ia->read.in.size = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ia->read.in.flags = file->f_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) args->opcode = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) args->nodeid = ff->nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) args->in_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) args->in_args[0].size = sizeof(ia->read.in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) args->in_args[0].value = &ia->read.in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) args->out_argvar = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) args->out_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) args->out_args[0].size = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static void fuse_release_user_pages(struct fuse_args_pages *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) bool should_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) for (i = 0; i < ap->num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (should_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) set_page_dirty_lock(ap->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) put_page(ap->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static void fuse_io_release(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) kfree(container_of(kref, struct fuse_io_priv, refcnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (io->err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return io->err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (io->bytes >= 0 && io->write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return io->bytes < 0 ? io->size : io->bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * In case of short read, the caller sets 'pos' to the position of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * actual end of fuse request in IO request. Otherwise, if bytes_requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * An example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * User requested DIO read of 64K. It was splitted into two 32K fuse requests,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * both submitted asynchronously. The first of them was ACKed by userspace as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * second request was ACKed as short, e.g. only 1K was read, resulting in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * pos == 33K.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * Thus, when all fuse requests are completed, the minimal non-negative 'pos'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * will be equal to the length of the longest contiguous fragment of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * transferred data starting from the beginning of IO request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) int left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) spin_lock(&io->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) io->err = io->err ? : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) io->bytes = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) left = --io->reqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (!left && io->blocking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) complete(io->done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) spin_unlock(&io->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (!left && !io->blocking) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ssize_t res = fuse_get_res_by_io(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (res >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct inode *inode = file_inode(io->iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) fi->attr_version = atomic64_inc_return(&fc->attr_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) io->iocb->ki_complete(io->iocb, res, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) kref_put(&io->refcnt, fuse_io_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) unsigned int npages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct fuse_io_args *ia;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) ia = kzalloc(sizeof(*ia), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (ia) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) ia->io = io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) &ia->ap.descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) if (!ia->ap.pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) kfree(ia);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) ia = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) return ia;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) static void fuse_io_free(struct fuse_io_args *ia)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) kfree(ia->ap.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) kfree(ia);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct fuse_io_priv *io = ia->io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ssize_t pos = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) fuse_release_user_pages(&ia->ap, io->should_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /* Nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) } else if (io->write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (ia->write.out.size > ia->write.in.size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) } else if (ia->write.in.size != ia->write.out.size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) pos = ia->write.in.offset - io->offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ia->write.out.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) u32 outsize = args->out_args[0].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (ia->read.in.size != outsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) pos = ia->read.in.offset - io->offset + outsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) fuse_aio_complete(io, err, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) fuse_io_free(ia);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) static ssize_t fuse_async_req_send(struct fuse_mount *fm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) struct fuse_io_args *ia, size_t num_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) ssize_t err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct fuse_io_priv *io = ia->io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) spin_lock(&io->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) kref_get(&io->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) io->size += num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) io->reqs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) spin_unlock(&io->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ia->ap.args.end = fuse_aio_complete_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) ia->ap.args.may_block = io->should_dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) err = fuse_simple_background(fm, &ia->ap.args, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) fuse_aio_complete_req(fm, &ia->ap.args, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) fl_owner_t owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct file *file = ia->io->iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct fuse_mount *fm = ff->fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (owner != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) ia->read.in.read_flags |= FUSE_READ_LOCKOWNER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ia->read.in.lock_owner = fuse_lock_owner_id(fm->fc, owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (ia->io->async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return fuse_async_req_send(fm, ia, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return fuse_simple_request(fm, &ia->ap.args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) static void fuse_read_update_size(struct inode *inode, loff_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) u64 attr_ver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (attr_ver == fi->attr_version && size < inode->i_size &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) fi->attr_version = atomic64_inc_return(&fc->attr_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) i_size_write(inode, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct fuse_args_pages *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (fc->writeback_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * A hole in a file. Some data after the hole are in page cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * but have not reached the client fs yet. So, the hole is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * present there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) int start_idx = num_read >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) size_t off = num_read & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) for (i = start_idx; i < ap->num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) zero_user_segment(ap->pages[i], off, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) loff_t pos = page_offset(ap->pages[0]) + num_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) fuse_read_update_size(inode, pos, attr_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) static int fuse_do_readpage(struct file *file, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct fuse_mount *fm = get_fuse_mount(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) loff_t pos = page_offset(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct fuse_page_desc desc = { .length = PAGE_SIZE };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct fuse_io_args ia = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) .ap.args.page_zeroing = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) .ap.args.out_pages = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) .ap.num_pages = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) .ap.pages = &page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) .ap.descs = &desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) ssize_t res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) u64 attr_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * Page writeback can extend beyond the lifetime of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * page-cache page, so make sure we read a properly synced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) fuse_wait_on_page_writeback(inode, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) attr_ver = fuse_get_attr_version(fm->fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /* Don't overflow end offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (pos + (desc.length - 1) == LLONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) desc.length--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) res = fuse_simple_request(fm, &ia.ap.args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * Short read means EOF. If file size is larger, truncate it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (res < desc.length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) fuse_short_read(inode, attr_ver, res, &ia.ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static int fuse_readpage(struct file *file, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (fuse_is_bad(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) err = fuse_do_readpage(file, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) fuse_invalidate_atime(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) int err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct fuse_args_pages *ap = &ia->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) size_t count = ia->read.in.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) size_t num_read = args->out_args[0].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct address_space *mapping = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) for (i = 0; mapping == NULL && i < ap->num_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) mapping = ap->pages[i]->mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * Short read means EOF. If file size is larger, truncate it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) if (!err && num_read < count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) fuse_short_read(inode, ia->read.attr_ver, num_read, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) fuse_invalidate_atime(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) for (i = 0; i < ap->num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct page *page = ap->pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (ia->ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) fuse_file_put(ia->ff, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) fuse_io_free(ia);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) struct fuse_mount *fm = ff->fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) struct fuse_args_pages *ap = &ia->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) loff_t pos = page_offset(ap->pages[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) size_t count = ap->num_pages << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) ssize_t res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) ap->args.out_pages = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) ap->args.page_zeroing = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) ap->args.page_replace = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /* Don't overflow end offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (pos + (count - 1) == LLONG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ap->descs[ap->num_pages - 1].length--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) WARN_ON((loff_t) (pos + count) < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) fuse_read_args_fill(ia, file, pos, count, FUSE_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) ia->read.attr_ver = fuse_get_attr_version(fm->fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (fm->fc->async_read) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) ia->ff = fuse_file_get(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) ap->args.end = fuse_readpages_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) err = fuse_simple_background(fm, &ap->args, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) res = fuse_simple_request(fm, &ap->args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) err = res < 0 ? res : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) fuse_readpages_end(fm, &ap->args, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) static void fuse_readahead(struct readahead_control *rac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct inode *inode = rac->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) unsigned int i, max_pages, nr_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (fuse_is_bad(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) max_pages = min_t(unsigned int, fc->max_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) fc->max_read / PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct fuse_io_args *ia;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct fuse_args_pages *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) nr_pages = readahead_count(rac) - nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (nr_pages > max_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) nr_pages = max_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (nr_pages == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) ia = fuse_io_alloc(NULL, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (!ia)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) ap = &ia->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) nr_pages = __readahead_batch(rac, ap->pages, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) for (i = 0; i < nr_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) fuse_wait_on_page_writeback(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) readahead_index(rac) + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) ap->descs[i].length = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) ap->num_pages = nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) fuse_send_readpages(ia, rac->file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) struct inode *inode = iocb->ki_filp->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * In auto invalidate mode, always update attributes on read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * Otherwise, only update if we attempt to read past EOF (to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * i_size is up to date).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) if (fc->auto_inval_data ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) (iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) err = fuse_update_attributes(inode, iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) return generic_file_read_iter(iocb, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) loff_t pos, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct fuse_args *args = &ia->ap.args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) ia->write.in.fh = ff->fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ia->write.in.offset = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) ia->write.in.size = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) args->opcode = FUSE_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) args->nodeid = ff->nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) args->in_numargs = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (ff->fm->fc->minor < 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) args->in_args[0].size = sizeof(ia->write.in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) args->in_args[0].value = &ia->write.in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) args->in_args[1].size = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) args->out_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) args->out_args[0].size = sizeof(ia->write.out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) args->out_args[0].value = &ia->write.out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static unsigned int fuse_write_flags(struct kiocb *iocb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) unsigned int flags = iocb->ki_filp->f_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (iocb->ki_flags & IOCB_DSYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) flags |= O_DSYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) if (iocb->ki_flags & IOCB_SYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) flags |= O_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) return flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) size_t count, fl_owner_t owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) struct kiocb *iocb = ia->io->iocb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct fuse_mount *fm = ff->fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct fuse_write_in *inarg = &ia->write.in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) ssize_t err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) fuse_write_args_fill(ia, ff, pos, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) inarg->flags = fuse_write_flags(iocb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) if (owner != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) inarg->lock_owner = fuse_lock_owner_id(fm->fc, owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (ia->io->async)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) return fuse_async_req_send(fm, ia, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) err = fuse_simple_request(fm, &ia->ap.args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (!err && ia->write.out.size > count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return err ?: ia->write.out.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) bool fuse_write_update_size(struct inode *inode, loff_t pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) fi->attr_version = atomic64_inc_return(&fc->attr_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (pos > inode->i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) i_size_write(inode, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) struct kiocb *iocb, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) loff_t pos, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) struct fuse_args_pages *ap = &ia->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) struct fuse_mount *fm = ff->fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) unsigned int offset, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) bool short_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) for (i = 0; i < ap->num_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) fuse_wait_on_page_writeback(inode, ap->pages[i]->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) fuse_write_args_fill(ia, ff, pos, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) ia->write.in.flags = fuse_write_flags(iocb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) err = fuse_simple_request(fm, &ap->args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) if (!err && ia->write.out.size > count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) short_write = ia->write.out.size < count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) offset = ap->descs[0].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) count = ia->write.out.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) for (i = 0; i < ap->num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) struct page *page = ap->pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (count >= PAGE_SIZE - offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) count -= PAGE_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (short_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) ClearPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (ia->write.page_locked && (i == ap->num_pages - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) struct iov_iter *ii, loff_t pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) unsigned int max_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) struct fuse_args_pages *ap = &ia->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct fuse_conn *fc = get_fuse_conn(mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) unsigned offset = pos & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) size_t count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) ap->args.in_pages = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) ap->descs[0].offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) size_t tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) pgoff_t index = pos >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) size_t bytes = min_t(size_t, PAGE_SIZE - offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) iov_iter_count(ii));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) bytes = min_t(size_t, bytes, fc->max_write - count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (iov_iter_fault_in_readable(ii, bytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) page = grab_cache_page_write_begin(mapping, index, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (mapping_writably_mapped(mapping))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) iov_iter_advance(ii, tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (!tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) bytes = min(bytes, iov_iter_single_seg_count(ii));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) ap->pages[ap->num_pages] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) ap->descs[ap->num_pages].length = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) ap->num_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) count += tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) pos += tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) offset += tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (offset == PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /* If we copied full page, mark it uptodate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (tmp == PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) ia->write.page_locked = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (!fc->big_writes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) } while (iov_iter_count(ii) && count < fc->max_write &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) ap->num_pages < max_pages && offset == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return count > 0 ? count : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) static inline unsigned int fuse_wr_pages(loff_t pos, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) unsigned int max_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return min_t(unsigned int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) ((pos + len - 1) >> PAGE_SHIFT) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) (pos >> PAGE_SHIFT) + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) max_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static ssize_t fuse_perform_write(struct kiocb *iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) struct iov_iter *ii, loff_t pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) ssize_t res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (inode->i_size < pos + iov_iter_count(ii))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) ssize_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) struct fuse_io_args ia = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct fuse_args_pages *ap = &ia.ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) fc->max_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) if (!ap->pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (count <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) err = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) err = fuse_send_write_pages(&ia, iocb, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) pos, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) size_t num_written = ia.write.out.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) res += num_written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) pos += num_written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) /* break out of the loop on short write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (num_written != count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) kfree(ap->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) } while (!err && iov_iter_count(ii));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (res > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) fuse_write_update_size(inode, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) fuse_invalidate_attr(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) return res > 0 ? res : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) struct address_space *mapping = file->f_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) ssize_t written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) ssize_t written_buffered = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) ssize_t err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) loff_t endbyte = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (get_fuse_conn(inode)->writeback_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /* Update size (EOF optimization) and mode (SUID clearing) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) err = fuse_update_attributes(mapping->host, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return generic_file_write_iter(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /* We can write back this queue in page reclaim */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) current->backing_dev_info = inode_to_bdi(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) err = generic_write_checks(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (err <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) err = file_remove_privs(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) err = file_update_time(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) if (iocb->ki_flags & IOCB_DIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) loff_t pos = iocb->ki_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) written = generic_file_direct_write(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (written < 0 || !iov_iter_count(from))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) pos += written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) written_buffered = fuse_perform_write(iocb, mapping, from, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (written_buffered < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) err = written_buffered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) endbyte = pos + written_buffered - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) err = filemap_write_and_wait_range(file->f_mapping, pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) endbyte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) invalidate_mapping_pages(file->f_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) pos >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) endbyte >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) written += written_buffered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) iocb->ki_pos = pos + written_buffered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) written = fuse_perform_write(iocb, mapping, from, iocb->ki_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (written >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) iocb->ki_pos += written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) current->backing_dev_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (written > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) written = generic_write_sync(iocb, written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) return written ? written : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) static inline void fuse_page_descs_length_init(struct fuse_page_desc *descs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) unsigned int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) unsigned int nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) for (i = index; i < index + nr_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) descs[i].length = PAGE_SIZE - descs[i].offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return (unsigned long)ii->iov->iov_base + ii->iov_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) size_t max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return min(iov_iter_single_seg_count(ii), max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) size_t *nbytesp, int write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) unsigned int max_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) size_t nbytes = 0; /* # bytes already packed in req */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) ssize_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) /* Special case for kernel I/O: can copy directly into the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (iov_iter_is_kvec(ii)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) unsigned long user_addr = fuse_get_user_addr(ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) ap->args.in_args[1].value = (void *) user_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) ap->args.out_args[0].value = (void *) user_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) iov_iter_advance(ii, frag_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) *nbytesp = frag_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) while (nbytes < *nbytesp && ap->num_pages < max_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) unsigned npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) size_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) ret = iov_iter_get_pages(ii, &ap->pages[ap->num_pages],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) *nbytesp - nbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) max_pages - ap->num_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) &start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) iov_iter_advance(ii, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) nbytes += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) ret += start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) ap->descs[ap->num_pages].offset = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) fuse_page_descs_length_init(ap->descs, ap->num_pages, npages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) ap->num_pages += npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) ap->descs[ap->num_pages - 1].length -=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) (PAGE_SIZE - ret) & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) ap->args.user_pages = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) if (write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) ap->args.in_pages = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) ap->args.out_pages = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) *nbytesp = nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) return ret < 0 ? ret : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) loff_t *ppos, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) int write = flags & FUSE_DIO_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) int cuse = flags & FUSE_DIO_CUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) struct file *file = io->iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) struct fuse_conn *fc = ff->fm->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) size_t nmax = write ? fc->max_write : fc->max_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) loff_t pos = *ppos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) size_t count = iov_iter_count(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) pgoff_t idx_from = pos >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) ssize_t res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) struct fuse_io_args *ia;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) unsigned int max_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) max_pages = iov_iter_npages(iter, fc->max_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) ia = fuse_io_alloc(io, max_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (!ia)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) ia->io = io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (!write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) fuse_sync_writes(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (!write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) io->should_dirty = !write && iter_is_iovec(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) ssize_t nres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) fl_owner_t owner = current->files;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) size_t nbytes = min(count, nmax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) max_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (err && !nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) if (!capable(CAP_FSETID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) ia->write.in.write_flags |= FUSE_WRITE_KILL_PRIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) nres = fuse_send_write(ia, pos, nbytes, owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) nres = fuse_send_read(ia, pos, nbytes, owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (!io->async || nres < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) fuse_release_user_pages(&ia->ap, io->should_dirty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) fuse_io_free(ia);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) ia = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (nres < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) iov_iter_revert(iter, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) err = nres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) WARN_ON(nres > nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) count -= nres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) res += nres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) pos += nres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (nres != nbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) iov_iter_revert(iter, nbytes - nres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) max_pages = iov_iter_npages(iter, fc->max_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) ia = fuse_io_alloc(io, max_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (!ia)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (ia)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) fuse_io_free(ia);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (res > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) *ppos = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) return res > 0 ? res : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) EXPORT_SYMBOL_GPL(fuse_direct_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) struct iov_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) loff_t *ppos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) ssize_t res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct inode *inode = file_inode(io->iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) res = fuse_direct_io(io, iter, ppos, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) fuse_invalidate_atime(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) ssize_t res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) res = fuse_direct_IO(iocb, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) res = __fuse_direct_read(&io, to, &iocb->ki_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) struct inode *inode = file_inode(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) ssize_t res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) /* Don't allow parallel writes to the same file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) res = generic_write_checks(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (res > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) res = fuse_direct_IO(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) res = fuse_direct_io(&io, from, &iocb->ki_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) FUSE_DIO_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) fuse_invalidate_attr(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) if (res > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) fuse_write_update_size(inode, iocb->ki_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (fuse_is_bad(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (FUSE_IS_DAX(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) return fuse_dax_read_iter(iocb, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) if (ff->passthrough.filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) return fuse_passthrough_read_iter(iocb, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) else if (!(ff->open_flags & FOPEN_DIRECT_IO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return fuse_cache_read_iter(iocb, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) return fuse_direct_read_iter(iocb, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) if (fuse_is_bad(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (FUSE_IS_DAX(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) return fuse_dax_write_iter(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (ff->passthrough.filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) return fuse_passthrough_write_iter(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) else if (!(ff->open_flags & FOPEN_DIRECT_IO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) return fuse_cache_write_iter(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) return fuse_direct_write_iter(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) static void fuse_writepage_free(struct fuse_writepage_args *wpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) struct fuse_args_pages *ap = &wpa->ia.ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) for (i = 0; i < ap->num_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) __free_page(ap->pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (wpa->ia.ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) fuse_file_put(wpa->ia.ff, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) kfree(ap->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) kfree(wpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) static void fuse_writepage_finish(struct fuse_mount *fm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) struct fuse_writepage_args *wpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) struct fuse_args_pages *ap = &wpa->ia.ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) struct inode *inode = wpa->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) struct backing_dev_info *bdi = inode_to_bdi(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) for (i = 0; i < ap->num_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) dec_wb_stat(&bdi->wb, WB_WRITEBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) wb_writeout_inc(&bdi->wb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) wake_up(&fi->page_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) /* Called under fi->lock, may release and reacquire it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) static void fuse_send_writepage(struct fuse_mount *fm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) struct fuse_writepage_args *wpa, loff_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) __releases(fi->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) __acquires(fi->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) struct fuse_writepage_args *aux, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) struct fuse_inode *fi = get_fuse_inode(wpa->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) struct fuse_write_in *inarg = &wpa->ia.write.in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) struct fuse_args *args = &wpa->ia.ap.args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) fi->writectr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (inarg->offset + data_size <= size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) inarg->size = data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) } else if (inarg->offset < size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) inarg->size = size - inarg->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) /* Got truncated off completely */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) args->in_args[1].size = inarg->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) args->force = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) args->nocreds = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) err = fuse_simple_background(fm, args, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (err == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) err = fuse_simple_background(fm, args, GFP_NOFS | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) /* Fails on broken connection only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) fi->writectr--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) rb_erase(&wpa->writepages_entry, &fi->writepages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) fuse_writepage_finish(fm, wpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) /* After fuse_writepage_finish() aux request list is private */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) for (aux = wpa->next; aux; aux = next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) next = aux->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) aux->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) fuse_writepage_free(aux);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) fuse_writepage_free(wpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) * If fi->writectr is positive (no truncate or fsync going on) send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) * all queued writepage requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) * Called with fi->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) void fuse_flush_writepages(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) __releases(fi->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) __acquires(fi->lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) struct fuse_mount *fm = get_fuse_mount(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) loff_t crop = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) struct fuse_writepage_args *wpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) wpa = list_entry(fi->queued_writes.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) struct fuse_writepage_args, queue_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) list_del_init(&wpa->queue_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) fuse_send_writepage(fm, wpa, crop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) struct fuse_writepage_args *wpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) struct rb_node **p = &root->rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) WARN_ON(!wpa->ia.ap.num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) struct fuse_writepage_args *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) pgoff_t curr_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) curr = rb_entry(parent, struct fuse_writepage_args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) writepages_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) WARN_ON(curr->inode != wpa->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) curr_index = curr->ia.write.in.offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) if (idx_from >= curr_index + curr->ia.ap.num_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) else if (idx_to < curr_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) return curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) rb_link_node(&wpa->writepages_entry, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) rb_insert_color(&wpa->writepages_entry, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) WARN_ON(fuse_insert_writeback(root, wpa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) struct fuse_writepage_args *wpa =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) container_of(args, typeof(*wpa), ia.ap.args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) struct inode *inode = wpa->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) mapping_set_error(inode->i_mapping, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * A writeback finished and this might have updated mtime/ctime on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * server making local mtime/ctime stale. Hence invalidate attrs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * Do this only if writeback_cache is not enabled. If writeback_cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * is enabled, we trust local ctime/mtime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (!fc->writeback_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) fuse_invalidate_attr(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) rb_erase(&wpa->writepages_entry, &fi->writepages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) while (wpa->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) struct fuse_mount *fm = get_fuse_mount(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) struct fuse_write_in *inarg = &wpa->ia.write.in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) struct fuse_writepage_args *next = wpa->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) wpa->next = next->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) next->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) next->ia.ff = fuse_file_get(wpa->ia.ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) tree_insert(&fi->writepages, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * Skip fuse_flush_writepages() to make it easy to crop requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * based on primary request size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) * 1st case (trivial): there are no concurrent activities using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * fuse_set/release_nowrite. Then we're on safe side because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) * fuse_flush_writepages() would call fuse_send_writepage()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) * 2nd case: someone called fuse_set_nowrite and it is waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) * now for completion of all in-flight requests. This happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) * rarely and no more than once per page, so this should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) * okay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) * of fuse_set_nowrite..fuse_release_nowrite section. The fact
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) * that fuse_set_nowrite returned implies that all in-flight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) * requests were completed along with all of their secondary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) * requests. Further primary requests are blocked by negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * writectr. Hence there cannot be any in-flight requests and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) * no invocations of fuse_writepage_end() while we're in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * fuse_set_nowrite..fuse_release_nowrite section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) fuse_send_writepage(fm, next, inarg->offset + inarg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) fi->writectr--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) fuse_writepage_finish(fm, wpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) fuse_writepage_free(wpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) struct fuse_inode *fi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) struct fuse_file *ff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (!list_empty(&fi->write_files)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) ff = list_entry(fi->write_files.next, struct fuse_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) write_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) fuse_file_get(ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) return ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) struct fuse_inode *fi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) struct fuse_file *ff = __fuse_write_file_get(fc, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) WARN_ON(!ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) return ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) struct fuse_file *ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * Inode is always written before the last reference is dropped and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) * hence this should not be reached from reclaim.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) * Writing back the inode from reclaim can deadlock if the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) * processing itself needs an allocation. Allocations triggering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) * reclaim while serving a request can't be prevented, because it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) * involve any number of unrelated userspace processes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) WARN_ON(wbc->for_reclaim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) ff = __fuse_write_file_get(fc, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) err = fuse_flush_times(inode, ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) fuse_file_put(ff, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) static struct fuse_writepage_args *fuse_writepage_args_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) struct fuse_writepage_args *wpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) struct fuse_args_pages *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) wpa = kzalloc(sizeof(*wpa), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) if (wpa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) ap = &wpa->ia.ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) ap->num_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (!ap->pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) kfree(wpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) wpa = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) return wpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) static int fuse_writepage_locked(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) struct address_space *mapping = page->mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) struct fuse_writepage_args *wpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) struct fuse_args_pages *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) struct page *tmp_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) int error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) wpa = fuse_writepage_args_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (!wpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) ap = &wpa->ia.ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) if (!tmp_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) wpa->ia.ff = fuse_write_file_get(fc, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) if (!wpa->ia.ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) goto err_nofile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) copy_highpage(tmp_page, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) wpa->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) ap->args.in_pages = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) ap->num_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) ap->pages[0] = tmp_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) ap->descs[0].offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) ap->descs[0].length = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) ap->args.end = fuse_writepage_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) wpa->inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) tree_insert(&fi->writepages, wpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) list_add_tail(&wpa->queue_entry, &fi->queued_writes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) fuse_flush_writepages(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) err_nofile:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) __free_page(tmp_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) kfree(wpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) mapping_set_error(page->mapping, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) static int fuse_writepage(struct page *page, struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) if (fuse_page_is_writeback(page->mapping->host, page->index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) * ->writepages() should be called for sync() and friends. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * should only get here on direct reclaim and then we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * allowed to skip a page which is already in flight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) redirty_page_for_writepage(wbc, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) err = fuse_writepage_locked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) struct fuse_fill_wb_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) struct fuse_writepage_args *wpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) struct fuse_file *ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) struct page **orig_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) unsigned int max_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) static bool fuse_pages_realloc(struct fuse_fill_wb_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) struct fuse_args_pages *ap = &data->wpa->ia.ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) struct fuse_conn *fc = get_fuse_conn(data->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) struct fuse_page_desc *descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) unsigned int npages = min_t(unsigned int,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) max_t(unsigned int, data->max_pages * 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) FUSE_DEFAULT_MAX_PAGES_PER_REQ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) fc->max_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) WARN_ON(npages <= data->max_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) pages = fuse_pages_alloc(npages, GFP_NOFS, &descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) kfree(ap->pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) ap->pages = pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) ap->descs = descs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) data->max_pages = npages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) static void fuse_writepages_send(struct fuse_fill_wb_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) struct fuse_writepage_args *wpa = data->wpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) struct inode *inode = data->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) int num_pages = wpa->ia.ap.num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) wpa->ia.ff = fuse_file_get(data->ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) list_add_tail(&wpa->queue_entry, &fi->queued_writes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) fuse_flush_writepages(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) for (i = 0; i < num_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) end_page_writeback(data->orig_pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) * Check under fi->lock if the page is under writeback, and insert it onto the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) * one already added for a page at this offset. If there's none, then insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) * this new request onto the auxiliary list, otherwise reuse the existing one by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) * swapping the new temp page with the old one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) struct fuse_inode *fi = get_fuse_inode(new_wpa->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) struct fuse_writepage_args *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) struct fuse_writepage_args *old_wpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) struct fuse_args_pages *new_ap = &new_wpa->ia.ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) WARN_ON(new_ap->num_pages != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) new_ap->num_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (!old_wpa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) for (tmp = old_wpa->next; tmp; tmp = tmp->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) pgoff_t curr_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) WARN_ON(tmp->inode != new_wpa->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (curr_index == page->index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) WARN_ON(tmp->ia.ap.num_pages != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) swap(tmp->ia.ap.pages[0], new_ap->pages[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) if (!tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) new_wpa->next = old_wpa->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) old_wpa->next = new_wpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) dec_wb_stat(&bdi->wb, WB_WRITEBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) wb_writeout_inc(&bdi->wb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) fuse_writepage_free(new_wpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) struct fuse_args_pages *ap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) struct fuse_fill_wb_data *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) WARN_ON(!ap->num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) * Being under writeback is unlikely but possible. For example direct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) * read to an mmaped fuse file will set the page dirty twice; once when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) * the pages are faulted with get_user_pages(), and then after the read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) * completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (fuse_page_is_writeback(data->inode, page->index))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) /* Reached max pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) if (ap->num_pages == fc->max_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) /* Reached max write bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) /* Discontinuity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) /* Need to grow the pages array? If so, did the expansion fail? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) static int fuse_writepages_fill(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) struct writeback_control *wbc, void *_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) struct fuse_fill_wb_data *data = _data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) struct fuse_writepage_args *wpa = data->wpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) struct fuse_args_pages *ap = &wpa->ia.ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) struct inode *inode = data->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) struct page *tmp_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (!data->ff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) data->ff = fuse_write_file_get(fc, fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) if (!data->ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) if (wpa && fuse_writepage_need_send(fc, page, ap, data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) fuse_writepages_send(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) data->wpa = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if (!tmp_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) * The page must not be redirtied until the writeout is completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) * (i.e. userspace has sent a reply to the write request). Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) * there could be more than one temporary page instance for each real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) * page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) * This is ensured by holding the page lock in page_mkwrite() while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) * checking fuse_page_is_writeback(). We already hold the page lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) * since clear_page_dirty_for_io() and keep it held until we add the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) * request to the fi->writepages list and increment ap->num_pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) * After this fuse_page_is_writeback() will indicate that the page is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) * under writeback, so we can release the page lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) if (data->wpa == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) wpa = fuse_writepage_args_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) if (!wpa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) __free_page(tmp_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) data->max_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) ap = &wpa->ia.ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) fuse_write_args_fill(&wpa->ia, data->ff, page_offset(page), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) wpa->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) ap->args.in_pages = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) ap->args.end = fuse_writepage_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) ap->num_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) wpa->inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) set_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) copy_highpage(tmp_page, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) ap->pages[ap->num_pages] = tmp_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) ap->descs[ap->num_pages].offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) ap->descs[ap->num_pages].length = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) data->orig_pages[ap->num_pages] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) if (data->wpa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) * Protected by fi->lock against concurrent access by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) * fuse_page_is_writeback().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) spin_lock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) ap->num_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) spin_unlock(&fi->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) } else if (fuse_writepage_add(wpa, page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) data->wpa = wpa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) end_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) static int fuse_writepages(struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) struct writeback_control *wbc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) struct fuse_fill_wb_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (fuse_is_bad(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) data.inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) data.wpa = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) data.ff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) data.orig_pages = kcalloc(fc->max_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) sizeof(struct page *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) if (!data.orig_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) if (data.wpa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) WARN_ON(!data.wpa->ia.ap.num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) fuse_writepages_send(&data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) if (data.ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) fuse_file_put(data.ff, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) kfree(data.orig_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) * It's worthy to make sure that space is reserved on disk for the write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) * but how to implement it without killing performance need more thinking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) static int fuse_write_begin(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) loff_t pos, unsigned len, unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) struct page **pagep, void **fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) pgoff_t index = pos >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) struct fuse_conn *fc = get_fuse_conn(file_inode(file));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) loff_t fsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) WARN_ON(!fc->writeback_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) page = grab_cache_page_write_begin(mapping, index, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) fuse_wait_on_page_writeback(mapping->host, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) if (PageUptodate(page) || len == PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) * Check if the start this page comes after the end of file, in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) * case the readpage can be optimized away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) fsize = i_size_read(mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) if (fsize <= (pos & PAGE_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) size_t off = pos & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) if (off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) zero_user_segment(page, 0, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) err = fuse_do_readpage(file, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) *pagep = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) static int fuse_write_end(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) loff_t pos, unsigned len, unsigned copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) struct page *page, void *fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) /* Haven't copied anything? Skip zeroing, size extending, dirtying. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) if (!copied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) /* Zero any unwritten bytes at the end of the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) size_t endoff = (pos + copied) & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) if (endoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) zero_user_segment(page, endoff, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) fuse_write_update_size(inode, pos + copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) static int fuse_launder_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) if (clear_page_dirty_for_io(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) struct inode *inode = page->mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) err = fuse_writepage_locked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) fuse_wait_on_page_writeback(inode, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) * Write back dirty pages now, because there may not be any suitable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) * open files later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) static void fuse_vma_close(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) filemap_write_and_wait(vma->vm_file->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) * Wait for writeback against this page to complete before allowing it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) * to be marked dirty again, and hence written back again, possibly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) * before the previous writepage completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) * Block here, instead of in ->writepage(), so that the userspace fs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) * can only block processes actually operating on the filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) * Otherwise unprivileged userspace fs would be able to block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) * unrelated:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) * - page migration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) * - sync(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) struct page *page = vmf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) struct inode *inode = file_inode(vmf->vma->vm_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) file_update_time(vmf->vma->vm_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) if (page->mapping != inode->i_mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) return VM_FAULT_NOPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) fuse_wait_on_page_writeback(inode, page->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) return VM_FAULT_LOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) static const struct vm_operations_struct fuse_file_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) .close = fuse_vma_close,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) .fault = filemap_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) .map_pages = filemap_map_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) .page_mkwrite = fuse_page_mkwrite,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) /* DAX mmap is superior to direct_io mmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) if (FUSE_IS_DAX(file_inode(file)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) return fuse_dax_mmap(file, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) if (ff->passthrough.filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) return fuse_passthrough_mmap(file, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) if (ff->open_flags & FOPEN_DIRECT_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) /* Can't provide the coherency needed for MAP_SHARED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) if (vma->vm_flags & VM_MAYSHARE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) invalidate_inode_pages2(file->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) return generic_file_mmap(file, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) fuse_link_write_file(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) file_accessed(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) vma->vm_ops = &fuse_file_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) static int convert_fuse_file_lock(struct fuse_conn *fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) const struct fuse_file_lock *ffl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) switch (ffl->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) case F_UNLCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) case F_RDLCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) case F_WRLCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) ffl->end < ffl->start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) fl->fl_start = ffl->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) fl->fl_end = ffl->end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) * Convert pid into init's pid namespace. The locks API will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) * translate it into the caller's pid namespace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) fl->fl_pid = pid_nr_ns(find_pid_ns(ffl->pid, fc->pid_ns), &init_pid_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) fl->fl_type = ffl->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) static void fuse_lk_fill(struct fuse_args *args, struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) const struct file_lock *fl, int opcode, pid_t pid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) int flock, struct fuse_lk_in *inarg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) memset(inarg, 0, sizeof(*inarg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) inarg->fh = ff->fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) inarg->lk.start = fl->fl_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) inarg->lk.end = fl->fl_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) inarg->lk.type = fl->fl_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) inarg->lk.pid = pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) if (flock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) inarg->lk_flags |= FUSE_LK_FLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) args->opcode = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) args->nodeid = get_node_id(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) args->in_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) args->in_args[0].size = sizeof(*inarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) args->in_args[0].value = inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) static int fuse_getlk(struct file *file, struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) struct fuse_mount *fm = get_fuse_mount(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) FUSE_ARGS(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) struct fuse_lk_in inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) struct fuse_lk_out outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) args.out_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) args.out_args[0].size = sizeof(outarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) args.out_args[0].value = &outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) err = fuse_simple_request(fm, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) err = convert_fuse_file_lock(fm->fc, &outarg.lk, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) struct fuse_mount *fm = get_fuse_mount(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) FUSE_ARGS(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) struct fuse_lk_in inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) struct pid *pid = fl->fl_type != F_UNLCK ? task_tgid(current) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) pid_t pid_nr = pid_nr_ns(pid, fm->fc->pid_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) /* NLM needs asynchronous locks, which we don't support yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) return -ENOLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) /* Unlock on close is handled by the flush method */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) if ((fl->fl_flags & FL_CLOSE_POSIX) == FL_CLOSE_POSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) err = fuse_simple_request(fm, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) /* locking is restartable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) if (err == -EINTR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) err = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) if (cmd == F_CANCELLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) } else if (cmd == F_GETLK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) if (fc->no_lock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) posix_test_lock(file, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) err = fuse_getlk(file, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) if (fc->no_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) err = posix_lock_file(file, fl, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) err = fuse_setlk(file, fl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) if (fc->no_flock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) err = locks_lock_file_wait(file, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) /* emulate flock with POSIX locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) ff->flock = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) err = fuse_setlk(file, fl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) struct inode *inode = mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) struct fuse_mount *fm = get_fuse_mount(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) FUSE_ARGS(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) struct fuse_bmap_in inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) struct fuse_bmap_out outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) if (!inode->i_sb->s_bdev || fm->fc->no_bmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) memset(&inarg, 0, sizeof(inarg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) inarg.block = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) inarg.blocksize = inode->i_sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) args.opcode = FUSE_BMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) args.nodeid = get_node_id(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) args.in_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) args.in_args[0].size = sizeof(inarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) args.in_args[0].value = &inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) args.out_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) args.out_args[0].size = sizeof(outarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) args.out_args[0].value = &outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) err = fuse_simple_request(fm, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) if (err == -ENOSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) fm->fc->no_bmap = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) return err ? 0 : outarg.block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) static loff_t fuse_lseek(struct file *file, loff_t offset, int whence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) struct fuse_mount *fm = get_fuse_mount(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) FUSE_ARGS(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) struct fuse_lseek_in inarg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) .fh = ff->fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) .offset = offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) .whence = whence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) struct fuse_lseek_out outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) if (fm->fc->no_lseek)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) goto fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) args.opcode = FUSE_LSEEK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) args.nodeid = ff->nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) args.in_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) args.in_args[0].size = sizeof(inarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) args.in_args[0].value = &inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) args.out_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) args.out_args[0].size = sizeof(outarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) args.out_args[0].value = &outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) err = fuse_simple_request(fm, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) if (err == -ENOSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) fm->fc->no_lseek = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) goto fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) return vfs_setpos(file, outarg.offset, inode->i_sb->s_maxbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) fallback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) err = fuse_update_attributes(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) return generic_file_llseek(file, offset, whence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) loff_t retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) switch (whence) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) case SEEK_SET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) case SEEK_CUR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) /* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) retval = generic_file_llseek(file, offset, whence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) case SEEK_END:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) retval = fuse_update_attributes(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) if (!retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) retval = generic_file_llseek(file, offset, whence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) case SEEK_HOLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) case SEEK_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) retval = fuse_lseek(file, offset, whence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) retval = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) * CUSE servers compiled on 32bit broke on 64bit kernels because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) * ABI was defined to be 'struct iovec' which is different on 32bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) * and 64bit. Fortunately we can determine which structure the server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) * used from the size of the reply.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) size_t transferred, unsigned count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) bool is_compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) if (count * sizeof(struct compat_iovec) == transferred) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) struct compat_iovec *ciov = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) * With this interface a 32bit server cannot support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) * non-compat (i.e. ones coming from 64bit apps) ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) * requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) if (!is_compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) dst[i].iov_base = compat_ptr(ciov[i].iov_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) dst[i].iov_len = ciov[i].iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) if (count * sizeof(struct iovec) != transferred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) memcpy(dst, src, transferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) /* Make sure iov_length() won't overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) static int fuse_verify_ioctl_iov(struct fuse_conn *fc, struct iovec *iov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) size_t n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) u32 max = fc->max_pages << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) for (n = 0; n < count; n++, iov++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) if (iov->iov_len > (size_t) max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) max -= iov->iov_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) void *src, size_t transferred, unsigned count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) bool is_compat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) struct fuse_ioctl_iovec *fiov = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) if (fc->minor < 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) return fuse_copy_ioctl_iovec_old(dst, src, transferred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) count, is_compat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) if (count * sizeof(struct fuse_ioctl_iovec) != transferred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) /* Did the server supply an inappropriate value? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) if (fiov[i].base != (unsigned long) fiov[i].base ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) fiov[i].len != (unsigned long) fiov[i].len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) dst[i].iov_len = (size_t) fiov[i].len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) if (is_compat &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) (ptr_to_compat(dst[i].iov_base) != fiov[i].base ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) (compat_size_t) dst[i].iov_len != fiov[i].len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) * For ioctls, there is no generic way to determine how much memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) * needs to be read and/or written. Furthermore, ioctls are allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) * to dereference the passed pointer, so the parameter requires deep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) * copying but FUSE has no idea whatsoever about what to copy in or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) * out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) * This is solved by allowing FUSE server to retry ioctl with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) * necessary in/out iovecs. Let's assume the ioctl implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) * needs to read in the following structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) * struct a {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) * char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) * size_t buflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) * On the first callout to FUSE server, inarg->in_size and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) * inarg->out_size will be NULL; then, the server completes the ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) * the actual iov array to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) * which tells FUSE to copy in the requested area and retry the ioctl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) * On the second round, the server has access to the structure and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) * from that it can tell what to look for next, so on the invocation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) * { .iov_base = a.buf, .iov_len = a.buflen } }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) * FUSE will copy both struct a and the pointed buffer from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) * process doing the ioctl and retry ioctl with both struct a and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) * buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) * This time, FUSE server has everything it needs and completes ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) * without FUSE_IOCTL_RETRY which finishes the ioctl call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) * Copying data out works the same way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) * automatically initializes in and out iovs by decoding @cmd with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) * _IOC_* macros and the server is not allowed to request RETRY. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) * limits ioctl data transfers to well-formed ioctls and is the forced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) * behavior for all FUSE servers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) struct fuse_mount *fm = ff->fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) struct fuse_ioctl_in inarg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) .fh = ff->fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) .cmd = cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) .arg = arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) .flags = flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) struct fuse_ioctl_out outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) struct iovec *iov_page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) struct iovec *in_iov = NULL, *out_iov = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) unsigned int in_iovs = 0, out_iovs = 0, max_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) size_t in_size, out_size, c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) ssize_t transferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) int err, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) struct iov_iter ii;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) struct fuse_args_pages ap = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) #if BITS_PER_LONG == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) inarg.flags |= FUSE_IOCTL_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) if (flags & FUSE_IOCTL_COMPAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) inarg.flags |= FUSE_IOCTL_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) #ifdef CONFIG_X86_X32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) if (in_x32_syscall())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) inarg.flags |= FUSE_IOCTL_COMPAT_X32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) /* assume all the iovs returned by client always fits in a page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) ap.pages = fuse_pages_alloc(fm->fc->max_pages, GFP_KERNEL, &ap.descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) iov_page = (struct iovec *) __get_free_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) if (!ap.pages || !iov_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) fuse_page_descs_length_init(ap.descs, 0, fm->fc->max_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) * If restricted, initialize IO parameters as encoded in @cmd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) * RETRY from server is not allowed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) struct iovec *iov = iov_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) iov->iov_base = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) case FS_IOC_GETFLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) case FS_IOC_SETFLAGS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) iov->iov_len = sizeof(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) iov->iov_len = _IOC_SIZE(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) if (_IOC_DIR(cmd) & _IOC_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) in_iov = iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) in_iovs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) if (_IOC_DIR(cmd) & _IOC_READ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) out_iov = iov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) out_iovs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) inarg.in_size = in_size = iov_length(in_iov, in_iovs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) inarg.out_size = out_size = iov_length(out_iov, out_iovs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) * Out data can be used either for actual out data or iovs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) * make sure there always is at least one page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) out_size = max_t(size_t, out_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) /* make sure there are enough buffer pages and init request with them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) if (max_pages > fm->fc->max_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) while (ap.num_pages < max_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) ap.pages[ap.num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) if (!ap.pages[ap.num_pages])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) ap.num_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) /* okay, let's send it to the client */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) ap.args.opcode = FUSE_IOCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) ap.args.nodeid = ff->nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) ap.args.in_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) ap.args.in_args[0].size = sizeof(inarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) ap.args.in_args[0].value = &inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) if (in_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) ap.args.in_numargs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) ap.args.in_args[1].size = in_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) ap.args.in_pages = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) iov_iter_init(&ii, WRITE, in_iov, in_iovs, in_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) c = copy_page_from_iter(ap.pages[i], 0, PAGE_SIZE, &ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) if (c != PAGE_SIZE && iov_iter_count(&ii))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) ap.args.out_numargs = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) ap.args.out_args[0].size = sizeof(outarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) ap.args.out_args[0].value = &outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) ap.args.out_args[1].size = out_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) ap.args.out_pages = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) ap.args.out_argvar = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) transferred = fuse_simple_request(fm, &ap.args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) err = transferred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) if (transferred < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) /* did it ask for retry? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) if (outarg.flags & FUSE_IOCTL_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) /* no retry if in restricted mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) if (!(flags & FUSE_IOCTL_UNRESTRICTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) in_iovs = outarg.in_iovs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) out_iovs = outarg.out_iovs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) * Make sure things are in boundary, separate checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) * are to protect against overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) if (in_iovs > FUSE_IOCTL_MAX_IOV ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) out_iovs > FUSE_IOCTL_MAX_IOV ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) vaddr = kmap_atomic(ap.pages[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) err = fuse_copy_ioctl_iovec(fm->fc, iov_page, vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) transferred, in_iovs + out_iovs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) (flags & FUSE_IOCTL_COMPAT) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) kunmap_atomic(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) in_iov = iov_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) out_iov = in_iov + in_iovs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) err = fuse_verify_ioctl_iov(fm->fc, in_iov, in_iovs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) err = fuse_verify_ioctl_iov(fm->fc, out_iov, out_iovs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) if (transferred > inarg.out_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) iov_iter_init(&ii, READ, out_iov, out_iovs, transferred);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) c = copy_page_to_iter(ap.pages[i], 0, PAGE_SIZE, &ii);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) if (c != PAGE_SIZE && iov_iter_count(&ii))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) free_page((unsigned long) iov_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) while (ap.num_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) __free_page(ap.pages[--ap.num_pages]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) kfree(ap.pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) return err ? err : outarg.result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) EXPORT_SYMBOL_GPL(fuse_do_ioctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) long fuse_ioctl_common(struct file *file, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) unsigned long arg, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) struct fuse_conn *fc = get_fuse_conn(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) if (!fuse_allow_current_process(fc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) if (fuse_is_bad(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) return fuse_do_ioctl(file, cmd, arg, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) static long fuse_file_ioctl(struct file *file, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) return fuse_ioctl_common(file, cmd, arg, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) unsigned long arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) * All files which have been polled are linked to RB tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) * fuse_conn->polled_files which is indexed by kh. Walk the tree and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) * find the matching one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) struct rb_node **parent_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) struct rb_node **link = &fc->polled_files.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) struct rb_node *last = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) while (*link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) struct fuse_file *ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) last = *link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) ff = rb_entry(last, struct fuse_file, polled_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) if (kh < ff->kh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) link = &last->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) else if (kh > ff->kh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) link = &last->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) if (parent_out)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) *parent_out = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) return link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) * The file is about to be polled. Make sure it's on the polled_files
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) * RB tree. Note that files once added to the polled_files tree are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) * not removed before the file is released. This is because a file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) * polled once is likely to be polled again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) static void fuse_register_polled_file(struct fuse_conn *fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) struct fuse_file *ff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) spin_lock(&fc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) if (RB_EMPTY_NODE(&ff->polled_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) struct rb_node **link, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) link = fuse_find_polled_node(fc, ff->kh, &parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) BUG_ON(*link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) rb_link_node(&ff->polled_node, parent, link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) rb_insert_color(&ff->polled_node, &fc->polled_files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) spin_unlock(&fc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) __poll_t fuse_file_poll(struct file *file, poll_table *wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) struct fuse_mount *fm = ff->fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) struct fuse_poll_out outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) FUSE_ARGS(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) if (fm->fc->no_poll)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) return DEFAULT_POLLMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) poll_wait(file, &ff->poll_wait, wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) inarg.events = mangle_poll(poll_requested_events(wait));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) * Ask for notification iff there's someone waiting for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) * The client may ignore the flag and always notify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) if (waitqueue_active(&ff->poll_wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) fuse_register_polled_file(fm->fc, ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) args.opcode = FUSE_POLL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) args.nodeid = ff->nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) args.in_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) args.in_args[0].size = sizeof(inarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) args.in_args[0].value = &inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) args.out_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) args.out_args[0].size = sizeof(outarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) args.out_args[0].value = &outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) err = fuse_simple_request(fm, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) return demangle_poll(outarg.revents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) if (err == -ENOSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) fm->fc->no_poll = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) return DEFAULT_POLLMASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) return EPOLLERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) EXPORT_SYMBOL_GPL(fuse_file_poll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) * wakes up the poll waiters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) int fuse_notify_poll_wakeup(struct fuse_conn *fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) struct fuse_notify_poll_wakeup_out *outarg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) u64 kh = outarg->kh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) struct rb_node **link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) spin_lock(&fc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) link = fuse_find_polled_node(fc, kh, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) if (*link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) struct fuse_file *ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) ff = rb_entry(*link, struct fuse_file, polled_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) wake_up_interruptible_sync(&ff->poll_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) spin_unlock(&fc->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) static void fuse_do_truncate(struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) struct iattr attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) attr.ia_valid = ATTR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) attr.ia_size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) attr.ia_file = file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) attr.ia_valid |= ATTR_FILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) fuse_do_setattr(file_dentry(file), &attr, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) return round_up(off, fc->max_pages << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) DECLARE_COMPLETION_ONSTACK(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) ssize_t ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) loff_t pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) loff_t i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) size_t count = iov_iter_count(iter), shortened = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) loff_t offset = iocb->ki_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) struct fuse_io_priv *io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) pos = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) i_size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) if ((iov_iter_rw(iter) == READ) && (offset >= i_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) if (!io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) spin_lock_init(&io->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) kref_init(&io->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) io->reqs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) io->bytes = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) io->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) io->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) io->write = (iov_iter_rw(iter) == WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) io->err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) * By default, we want to optimize all I/Os with async request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) * submission to the client filesystem if supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) io->async = ff->fm->fc->async_dio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) io->iocb = iocb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) io->blocking = is_sync_kiocb(iocb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) /* optimization for short read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) if (io->async && !io->write && offset + count > i_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) iov_iter_truncate(iter, fuse_round_up(ff->fm->fc, i_size - offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) shortened = count - iov_iter_count(iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) count -= shortened;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) * We cannot asynchronously extend the size of a file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) * In such case the aio will behave exactly like sync io.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) if ((offset + count > i_size) && io->write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) io->blocking = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) if (io->async && io->blocking) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) * Additional reference to keep io around after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) * calling fuse_aio_complete()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) kref_get(&io->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) io->done = &wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) if (iov_iter_rw(iter) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) fuse_invalidate_attr(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) ret = __fuse_direct_read(io, iter, &pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) iov_iter_reexpand(iter, iov_iter_count(iter) + shortened);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) if (io->async) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) bool blocking = io->blocking;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) /* we have a non-extending, async request, so return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) if (!blocking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) return -EIOCBQUEUED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) wait_for_completion(&wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) ret = fuse_get_res_by_io(io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) kref_put(&io->refcnt, fuse_io_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) if (iov_iter_rw(iter) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) fuse_write_update_size(inode, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) else if (ret < 0 && offset + count > i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) fuse_do_truncate(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) fuse_sync_writes(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) loff_t length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) struct fuse_file *ff = file->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) struct fuse_mount *fm = ff->fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) FUSE_ARGS(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) struct fuse_fallocate_in inarg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) .fh = ff->fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) .offset = offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) .length = length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) .mode = mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) (mode & FALLOC_FL_PUNCH_HOLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) bool block_faults = FUSE_IS_DAX(inode) && lock_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) if (fm->fc->no_fallocate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) if (lock_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) if (block_faults) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) down_write(&fi->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) err = fuse_dax_break_layouts(inode, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) if (mode & FALLOC_FL_PUNCH_HOLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) loff_t endbyte = offset + length - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) err = fuse_writeback_range(inode, offset, endbyte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) if (!(mode & FALLOC_FL_KEEP_SIZE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) offset + length > i_size_read(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) err = inode_newsize_ok(inode, offset + length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) if (!(mode & FALLOC_FL_KEEP_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) args.opcode = FUSE_FALLOCATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) args.nodeid = ff->nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) args.in_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) args.in_args[0].size = sizeof(inarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) args.in_args[0].value = &inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) err = fuse_simple_request(fm, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) if (err == -ENOSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) fm->fc->no_fallocate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) /* we could have extended the file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) if (!(mode & FALLOC_FL_KEEP_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) bool changed = fuse_write_update_size(inode, offset + length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) if (changed && fm->fc->writeback_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) file_update_time(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) if (mode & FALLOC_FL_PUNCH_HOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) truncate_pagecache_range(inode, offset, offset + length - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) fuse_invalidate_attr(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) if (!(mode & FALLOC_FL_KEEP_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) if (block_faults)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) up_write(&fi->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) if (lock_inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) fuse_flush_time_update(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) struct file *file_out, loff_t pos_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) size_t len, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) struct fuse_file *ff_in = file_in->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) struct fuse_file *ff_out = file_out->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) struct inode *inode_in = file_inode(file_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) struct inode *inode_out = file_inode(file_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) struct fuse_inode *fi_out = get_fuse_inode(inode_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) struct fuse_mount *fm = ff_in->fm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) struct fuse_conn *fc = fm->fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) FUSE_ARGS(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) struct fuse_copy_file_range_in inarg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) .fh_in = ff_in->fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) .off_in = pos_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) .nodeid_out = ff_out->nodeid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) .fh_out = ff_out->fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) .off_out = pos_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) .len = len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) .flags = flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) struct fuse_write_out outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) ssize_t err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) /* mark unstable when write-back is not used, and file_out gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) * extended */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) bool is_unstable = (!fc->writeback_cache) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) ((pos_out + len) > inode_out->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) if (fc->no_copy_file_range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) return -EXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) inode_lock(inode_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) inode_unlock(inode_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) inode_lock(inode_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) err = file_modified(file_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) * Write out dirty pages in the destination file before sending the COPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) * request to userspace. After the request is completed, truncate off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) * pages (including partial ones) from the cache that have been copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) * since these contain stale data at that point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) * This should be mostly correct, but if the COPY writes to partial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) * pages (at the start or end) and the parts not covered by the COPY are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) * written through a memory map after calling fuse_writeback_range(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) * then these partial page modifications will be lost on truncation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) * It is unlikely that someone would rely on such mixed style
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) * modifications. Yet this does give less guarantees than if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) * copying was performed with write(2).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) * To fix this a i_mmap_sem style lock could be used to prevent new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) * faults while the copy is ongoing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) if (is_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) args.opcode = FUSE_COPY_FILE_RANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) args.nodeid = ff_in->nodeid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) args.in_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) args.in_args[0].size = sizeof(inarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) args.in_args[0].value = &inarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) args.out_numargs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) args.out_args[0].size = sizeof(outarg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) args.out_args[0].value = &outarg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) err = fuse_simple_request(fm, &args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) if (err == -ENOSYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) fc->no_copy_file_range = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) err = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) truncate_inode_pages_range(inode_out->i_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) ALIGN_DOWN(pos_out, PAGE_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) if (fc->writeback_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) fuse_write_update_size(inode_out, pos_out + outarg.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) file_update_time(file_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) fuse_invalidate_attr(inode_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) err = outarg.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) if (is_unstable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) inode_unlock(inode_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) file_accessed(file_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) fuse_flush_time_update(inode_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) struct file *dst_file, loff_t dst_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) size_t len, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) ret = __fuse_copy_file_range(src_file, src_off, dst_file, dst_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) if (ret == -EOPNOTSUPP || ret == -EXDEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) ret = generic_copy_file_range(src_file, src_off, dst_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) dst_off, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) static const struct file_operations fuse_file_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) .llseek = fuse_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) .read_iter = fuse_file_read_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) .write_iter = fuse_file_write_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) .mmap = fuse_file_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) .open = fuse_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) .flush = fuse_flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) .release = fuse_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) .fsync = fuse_fsync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) .lock = fuse_file_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) .get_unmapped_area = thp_get_unmapped_area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) .flock = fuse_file_flock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) .splice_read = generic_file_splice_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) .splice_write = iter_file_splice_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) .unlocked_ioctl = fuse_file_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) .compat_ioctl = fuse_file_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) .poll = fuse_file_poll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) .fallocate = fuse_file_fallocate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) .copy_file_range = fuse_copy_file_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) static const struct address_space_operations fuse_file_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) .readpage = fuse_readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) .readahead = fuse_readahead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) .writepage = fuse_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) .writepages = fuse_writepages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) .launder_page = fuse_launder_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) .set_page_dirty = __set_page_dirty_nobuffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) .bmap = fuse_bmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) .direct_IO = fuse_direct_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) .write_begin = fuse_write_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) .write_end = fuse_write_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) void fuse_init_file_inode(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) struct fuse_inode *fi = get_fuse_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) inode->i_fop = &fuse_file_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) inode->i_data.a_ops = &fuse_file_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) INIT_LIST_HEAD(&fi->write_files);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) INIT_LIST_HEAD(&fi->queued_writes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) fi->writectr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) init_waitqueue_head(&fi->page_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) fi->writepages = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) if (IS_ENABLED(CONFIG_FUSE_DAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) fuse_dax_inode_init(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) }