^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/fs/nfs/file.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1992 Rick Sladkey
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Changes Copyright (C) 1994 by Florian La Roche
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * - Do not copy data too often around in the kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * - In nfs_file_read the return value of kmalloc wasn't checked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * - Put in a better version of read look-ahead buffering. Original idea
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * and implementation by Wai S Kok elekokws@ee.nus.sg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Expire cache on write to a file by Wai S Kok (Oct 1994).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Total rewrite of read side for new NFS buffer cache.. Linus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * nfs regular file handling functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/nfs_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/nfs_mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "delegation.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "iostat.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "fscache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "pnfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "nfstrace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define NFSDBG_FACILITY NFSDBG_FILE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static const struct vm_operations_struct nfs_file_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* Hack for future NFS swap support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #ifndef IS_SWAPFILE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) # define IS_SWAPFILE(inode) (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int nfs_check_flags(int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if ((flags & (O_APPEND | O_DIRECT)) == (O_APPEND | O_DIRECT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) EXPORT_SYMBOL_GPL(nfs_check_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Open file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) nfs_file_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) dprintk("NFS: open file(%pD2)\n", filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) nfs_inc_stats(inode, NFSIOS_VFSOPEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) res = nfs_check_flags(filp->f_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) res = nfs_open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) nfs_file_release(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) dprintk("NFS: release(%pD2)\n", filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) nfs_file_clear_open_context(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) EXPORT_SYMBOL_GPL(nfs_file_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * nfs_revalidate_size - Revalidate the file size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * @inode: pointer to inode struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @filp: pointer to struct file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * Revalidates the file length. This is basically a wrapper around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * nfs_revalidate_inode() that takes into account the fact that we may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * have cached writes (in which case we don't care about the server's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * idea of what the file length is), or O_DIRECT (in which case we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * shouldn't trust the cache).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static int nfs_revalidate_file_size(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct nfs_server *server = NFS_SERVER(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (filp->f_flags & O_DIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) goto force_reval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (nfs_check_cache_invalid(inode, NFS_INO_REVAL_PAGECACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) goto force_reval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) force_reval:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return __nfs_revalidate_inode(server, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) loff_t nfs_file_llseek(struct file *filp, loff_t offset, int whence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) dprintk("NFS: llseek file(%pD2, %lld, %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) filp, offset, whence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * the cached file length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (whence != SEEK_SET && whence != SEEK_CUR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct inode *inode = filp->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int retval = nfs_revalidate_file_size(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (retval < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return (loff_t)retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return generic_file_llseek(filp, offset, whence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) EXPORT_SYMBOL_GPL(nfs_file_llseek);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Flush all dirty pages, and check for write errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) nfs_file_flush(struct file *file, fl_owner_t id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) errseq_t since;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) dprintk("NFS: flush(%pD2)\n", file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) nfs_inc_stats(inode, NFSIOS_VFSFLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if ((file->f_mode & FMODE_WRITE) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* Flush writes to the server and return any errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) since = filemap_sample_wb_err(file->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) nfs_wb_all(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return filemap_check_wb_err(file->f_mapping, since);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) nfs_file_read(struct kiocb *iocb, struct iov_iter *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct inode *inode = file_inode(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) ssize_t result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (iocb->ki_flags & IOCB_DIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return nfs_file_direct_read(iocb, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) dprintk("NFS: read(%pD2, %zu@%lu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) iocb->ki_filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) iov_iter_count(to), (unsigned long) iocb->ki_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) nfs_start_io_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) result = nfs_revalidate_mapping(inode, iocb->ki_filp->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (!result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) result = generic_file_read_iter(iocb, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (result > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) nfs_add_stats(inode, NFSIOS_NORMALREADBYTES, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) nfs_end_io_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) EXPORT_SYMBOL_GPL(nfs_file_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) nfs_file_mmap(struct file * file, struct vm_area_struct * vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) dprintk("NFS: mmap(%pD2)\n", file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Note: generic_file_mmap() returns ENOSYS on nommu systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * so we call that before revalidating the mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) status = generic_file_mmap(file, vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) vma->vm_ops = &nfs_file_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) status = nfs_revalidate_mapping(inode, file->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) EXPORT_SYMBOL_GPL(nfs_file_mmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * Flush any dirty pages for this process, and check for write errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * The return status from this call provides a reliable indication of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * whether any write errors occurred for this process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) nfs_file_fsync_commit(struct file *file, int datasync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) dprintk("NFS: fsync file(%pD2) datasync %d\n", file, datasync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) nfs_inc_stats(inode, NFSIOS_VFSFSYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) ret = nfs_commit_inode(inode, FLUSH_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return file_check_and_advance_wb_err(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct nfs_open_context *ctx = nfs_file_open_context(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) trace_nfs_fsync_enter(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ret = file_write_and_wait_range(file, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ret = nfs_file_fsync_commit(file, datasync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ret = pnfs_sync_inode(inode, !!datasync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!test_and_clear_bit(NFS_CONTEXT_RESEND_WRITES, &ctx->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * If nfs_file_fsync_commit detected a server reboot, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * resend all dirty pages that might have been covered by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * the NFS_CONTEXT_RESEND_WRITES flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) end = LLONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) trace_nfs_fsync_exit(inode, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) EXPORT_SYMBOL_GPL(nfs_file_fsync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * Decide whether a read/modify/write cycle may be more efficient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * then a modify/write/read cycle when writing to a page in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * page cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Some pNFS layout drivers can only read/write at a certain block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * granularity like all block devices and therefore we must perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * read/modify/write whenever a page hasn't read yet and the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * to be written there is not aligned to a block boundary and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * smaller than the block size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * The modify/write/read cycle may occur if a page is read before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * being completely filled by the writer. In this situation, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * page must be completely written to stable storage on the server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * before it can be refilled by reading in the page from the server.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * This can lead to expensive, small, FILE_SYNC mode writes being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * It may be more efficient to read the page first if the file is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * open for reading in addition to writing, the page is not marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * as Uptodate, it is not dirty or waiting to be committed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * indicating that it was previously allocated and then modified,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * that there were valid bytes of data in that range of the file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * and that the new data won't completely replace the old data in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * that range of the file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static bool nfs_full_page_write(struct page *page, loff_t pos, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) unsigned int pglen = nfs_page_length(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) unsigned int offset = pos & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) unsigned int end = offset + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return !pglen || (end >= pglen && !offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static bool nfs_want_read_modify_write(struct file *file, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) loff_t pos, unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Up-to-date pages, those with ongoing or full-page write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * don't need read/modify/write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (PageUptodate(page) || PagePrivate(page) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) nfs_full_page_write(page, pos, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (pnfs_ld_read_whole_page(file->f_mapping->host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* Open for reading too? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (file->f_mode & FMODE_READ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * This does the "real" work of the write. We must allocate and lock the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * page to be sent back to the generic routine, which then copies the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * data from user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * If the writer ends up delaying the write, the writer needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * increment the page use counts until he is done with the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static int nfs_write_begin(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) loff_t pos, unsigned len, unsigned flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct page **pagep, void **fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) pgoff_t index = pos >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int once_thru = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%lu), %u@%lld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) file, mapping->host->i_ino, len, (long long) pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) page = grab_cache_page_write_begin(mapping, index, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) *pagep = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ret = nfs_flush_incompatible(file, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) } else if (!once_thru &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) nfs_want_read_modify_write(file, page, pos, len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) once_thru = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ret = nfs_readpage(file, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) goto start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static int nfs_write_end(struct file *file, struct address_space *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) loff_t pos, unsigned len, unsigned copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct page *page, void *fsdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unsigned offset = pos & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct nfs_open_context *ctx = nfs_file_open_context(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) dfprintk(PAGECACHE, "NFS: write_end(%pD2(%lu), %u@%lld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) file, mapping->host->i_ino, len, (long long) pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * Zero any uninitialised parts of the page, and then mark the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * as up to date if it turns out that we're extending the file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) unsigned pglen = nfs_page_length(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) unsigned end = offset + copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (pglen == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) zero_user_segments(page, 0, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) end, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) } else if (end >= pglen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) zero_user_segment(page, end, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (offset == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) zero_user_segment(page, pglen, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) status = nfs_updatepage(file, page, offset, copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) NFS_I(mapping->host)->write_io += copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (nfs_ctx_key_to_expire(ctx, mapping->host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) status = nfs_wb_all(mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * Partially or wholly invalidate a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * - Release the private state associated with a page if undergoing complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * page invalidation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * - Called if either PG_private or PG_fscache is set on the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * - Caller holds page lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) static void nfs_invalidate_page(struct page *page, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) unsigned int length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) page, offset, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (offset != 0 || length < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* Cancel any unstarted writes on this page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) nfs_wb_page_cancel(page_file_mapping(page)->host, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) nfs_fscache_invalidate_page(page, page->mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * Attempt to release the private state associated with a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * - Called if either PG_private or PG_fscache is set on the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * - Caller holds page lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * - Return true (may release page) or false (may not)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static int nfs_release_page(struct page *page, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* If PagePrivate() is set, then the page is not freeable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (PagePrivate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return nfs_fscache_release_page(page, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static void nfs_check_dirty_writeback(struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) bool *dirty, bool *writeback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct nfs_inode *nfsi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct address_space *mapping = page_file_mapping(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (!mapping || PageSwapCache(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * Check if an unstable page is currently being committed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * if so, have the VM treat it as if the page is under writeback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * so it will not block due to pages that will shortly be freeable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) nfsi = NFS_I(mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (atomic_read(&nfsi->commit_info.rpcs_out)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) *writeback = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * If PagePrivate() is set, then the page is not freeable and as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * inode is not being committed, it's not going to be cleaned in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * near future so treat it as dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (PagePrivate(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) *dirty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * Attempt to clear the private state associated with a page when an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * occurs that requires the cached contents of an inode to be written back or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * destroyed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * - Called if either PG_private or fscache is set on the page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * - Caller holds page lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * - Return 0 if successful, -error otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static int nfs_launder_page(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct inode *inode = page_file_mapping(page)->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct nfs_inode *nfsi = NFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) inode->i_ino, (long long)page_offset(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) nfs_fscache_wait_on_page_write(nfsi, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return nfs_wb_page(inode, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) sector_t *span)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) unsigned long blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) long long isize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) blocks = inode->i_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) isize = inode->i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (blocks*512 < isize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) pr_warn("swap activate: swapfile has holes\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) *span = sis->pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) return rpc_clnt_swap_activate(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static void nfs_swap_deactivate(struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) rpc_clnt_swap_deactivate(clnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) const struct address_space_operations nfs_file_aops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) .readpage = nfs_readpage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) .readpages = nfs_readpages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) .set_page_dirty = __set_page_dirty_nobuffers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) .writepage = nfs_writepage,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) .writepages = nfs_writepages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) .write_begin = nfs_write_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) .write_end = nfs_write_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) .invalidatepage = nfs_invalidate_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) .releasepage = nfs_release_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) .direct_IO = nfs_direct_IO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) #ifdef CONFIG_MIGRATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) .migratepage = nfs_migrate_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) .launder_page = nfs_launder_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) .is_dirty_writeback = nfs_check_dirty_writeback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) .error_remove_page = generic_error_remove_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) .swap_activate = nfs_swap_activate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) .swap_deactivate = nfs_swap_deactivate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * Notification that a PTE pointing to an NFS page is about to be made
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * writable, implying that someone is about to modify the page through a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * shared-writable mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct page *page = vmf->page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct file *filp = vmf->vma->vm_file;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct inode *inode = file_inode(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) unsigned pagelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) vm_fault_t ret = VM_FAULT_NOPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%lu), offset %lld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) filp, filp->f_mapping->host->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) (long long)page_offset(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) sb_start_pagefault(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /* make sure the cache has finished storing the page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) nfs_fscache_wait_on_page_write(NFS_I(inode), page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) wait_on_bit_action(&NFS_I(inode)->flags, NFS_INO_INVALIDATING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) nfs_wait_bit_killable, TASK_KILLABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) mapping = page_file_mapping(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (mapping != inode->i_mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) wait_on_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) pagelen = nfs_page_length(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) if (pagelen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ret = VM_FAULT_LOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) if (nfs_flush_incompatible(filp, page) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) nfs_updatepage(filp, page, 0, pagelen) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) ret = VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) sb_end_pagefault(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static const struct vm_operations_struct nfs_file_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) .fault = filemap_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) .map_pages = filemap_map_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) .page_mkwrite = nfs_vm_page_mkwrite,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) static int nfs_need_check_write(struct file *filp, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) int error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct nfs_open_context *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) ctx = nfs_file_open_context(filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (nfs_error_is_fatal_on_server(error) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) nfs_ctx_key_to_expire(ctx, inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) unsigned long written = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) ssize_t result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) errseq_t since;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) result = nfs_key_timeout_notify(file, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (iocb->ki_flags & IOCB_DIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return nfs_file_direct_write(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) dprintk("NFS: write(%pD2, %zu@%Ld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) file, iov_iter_count(from), (long long) iocb->ki_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (IS_SWAPFILE(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) goto out_swapfile;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * O_APPEND implies that we must revalidate the file length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) if (iocb->ki_flags & IOCB_APPEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) result = nfs_revalidate_file_size(inode, file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (iocb->ki_pos > i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) nfs_revalidate_mapping(inode, file->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) since = filemap_sample_wb_err(file->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) nfs_start_io_write(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) result = generic_write_checks(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (result > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) current->backing_dev_info = inode_to_bdi(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) result = generic_perform_write(file, from, iocb->ki_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) current->backing_dev_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) nfs_end_io_write(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (result <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) written = result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) iocb->ki_pos += written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) result = generic_write_sync(iocb, written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (result < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /* Return error values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) error = filemap_check_wb_err(file->f_mapping, since);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (nfs_need_check_write(file, inode, error)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) int err = nfs_wb_all(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) result = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) out_swapfile:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) printk(KERN_INFO "NFS: attempt to write to active swap file!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return -ETXTBSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) EXPORT_SYMBOL_GPL(nfs_file_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) do_getlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct inode *inode = filp->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) unsigned int saved_type = fl->fl_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /* Try local locking first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) posix_test_lock(filp, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (fl->fl_type != F_UNLCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* found a conflict */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) fl->fl_type = saved_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) goto out_noconflict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (is_local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) goto out_noconflict;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) status = NFS_PROTO(inode)->lock(filp, cmd, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) out_noconflict:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) fl->fl_type = F_UNLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) do_unlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct inode *inode = filp->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) struct nfs_lock_context *l_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * Flush all pending writes before doing anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * with locks..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) nfs_wb_all(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) l_ctx = nfs_get_lock_context(nfs_file_open_context(filp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (!IS_ERR(l_ctx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) status = nfs_iocounter_wait(l_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) nfs_put_lock_context(l_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) /* NOTE: special case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * If we're signalled while cleaning up locks on process exit, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * still need to complete the unlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (status < 0 && !(fl->fl_flags & FL_CLOSE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * Use local locking if mounted with "-onolock" or with appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * "-olocal_lock="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (!is_local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) status = NFS_PROTO(inode)->lock(filp, cmd, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) status = locks_lock_file_wait(filp, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) struct inode *inode = filp->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * Flush all pending writes before doing anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * with locks..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) status = nfs_sync_mapping(filp->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * Use local locking if mounted with "-onolock" or with appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) * "-olocal_lock="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) if (!is_local)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) status = NFS_PROTO(inode)->lock(filp, cmd, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) status = locks_lock_file_wait(filp, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * Invalidate cache to prevent missing any changes. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * the file is mapped, clear the page cache as well so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * those mappings will be loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * This makes locking act as a cache coherency point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) nfs_sync_mapping(filp->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) nfs_zap_caches(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (mapping_mapped(filp->f_mapping))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) nfs_revalidate_mapping(inode, filp->f_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * Lock a (portion of) a file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) int nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) struct inode *inode = filp->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) int ret = -ENOLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) int is_local = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) dprintk("NFS: lock(%pD2, t=%x, fl=%x, r=%lld:%lld)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) filp, fl->fl_type, fl->fl_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) (long long)fl->fl_start, (long long)fl->fl_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) nfs_inc_stats(inode, NFSIOS_VFSLOCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) /* No mandatory locks over NFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FCNTL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) is_local = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) if (NFS_PROTO(inode)->lock_check_bounds != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ret = NFS_PROTO(inode)->lock_check_bounds(fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (IS_GETLK(cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) ret = do_getlk(filp, cmd, fl, is_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) else if (fl->fl_type == F_UNLCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) ret = do_unlk(filp, cmd, fl, is_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) ret = do_setlk(filp, cmd, fl, is_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) EXPORT_SYMBOL_GPL(nfs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * Lock a (portion of) a file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct inode *inode = filp->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) int is_local = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) dprintk("NFS: flock(%pD2, t=%x, fl=%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) filp, fl->fl_type, fl->fl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (!(fl->fl_flags & FL_FLOCK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return -ENOLCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * The NFSv4 protocol doesn't support LOCK_MAND, which is not part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * any standard. In principle we might be able to support LOCK_MAND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * on NFSv2/3 since NLMv3/4 support DOS share modes, but for now the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * NFS code is not set up for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (fl->fl_type & LOCK_MAND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) is_local = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /* We're simulating flock() locks using posix locks on the server */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (fl->fl_type == F_UNLCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) return do_unlk(filp, cmd, fl, is_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return do_setlk(filp, cmd, fl, is_local);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) EXPORT_SYMBOL_GPL(nfs_flock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) const struct file_operations nfs_file_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) .llseek = nfs_file_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) .read_iter = nfs_file_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) .write_iter = nfs_file_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) .mmap = nfs_file_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) .open = nfs_file_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) .flush = nfs_file_flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) .release = nfs_file_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) .fsync = nfs_file_fsync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) .lock = nfs_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) .flock = nfs_flock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) .splice_read = generic_file_splice_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) .splice_write = iter_file_splice_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) .check_flags = nfs_check_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) .setlease = simple_nosetlease,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) EXPORT_SYMBOL_GPL(nfs_file_operations);