^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/fs/ext4/file.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1992, 1993, 1994, 1995
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Remy Card (card@masi.ibp.fr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Laboratoire MASI - Institut Blaise Pascal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Universite Pierre et Marie Curie (Paris VI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * linux/fs/minix/file.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Copyright (C) 1991, 1992 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * ext4 fs regular file handling primitives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * 64-bit file support on 64-bit platforms by Jakub Jelinek
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * (jj@sunsite.ms.mff.cuni.cz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/iomap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/path.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/dax.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/quotaops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/pagevec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "ext4.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "ext4_jbd2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "xattr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "acl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "truncate.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static bool ext4_dio_supported(struct kiocb *iocb, struct iov_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct inode *inode = file_inode(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (!fscrypt_dio_supported(iocb, iter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (fsverity_active(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (ext4_should_journal_data(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (ext4_has_inline_data(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static ssize_t ext4_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct inode *inode = file_inode(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (iocb->ki_flags & IOCB_NOWAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (!inode_trylock_shared(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) inode_lock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (!ext4_dio_supported(iocb, to)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) inode_unlock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Fallback to buffered I/O if the operation being performed on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * the inode is not supported by direct I/O. The IOCB_DIRECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * flag needs to be cleared here in order to ensure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * direct I/O path within generic_file_read_iter() is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * taken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) iocb->ki_flags &= ~IOCB_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return generic_file_read_iter(iocb, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) ret = iomap_dio_rw(iocb, to, &ext4_iomap_ops, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) is_sync_kiocb(iocb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) inode_unlock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) file_accessed(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #ifdef CONFIG_FS_DAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static ssize_t ext4_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct inode *inode = file_inode(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (iocb->ki_flags & IOCB_NOWAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (!inode_trylock_shared(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) inode_lock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * Recheck under inode lock - at this point we are sure it cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * change anymore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (!IS_DAX(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) inode_unlock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Fallback to buffered IO in case we cannot support DAX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return generic_file_read_iter(iocb, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) ret = dax_iomap_rw(iocb, to, &ext4_iomap_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) inode_unlock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) file_accessed(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static ssize_t ext4_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct inode *inode = file_inode(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (!iov_iter_count(to))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return 0; /* skip atime */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #ifdef CONFIG_FS_DAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (IS_DAX(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return ext4_dax_read_iter(iocb, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (iocb->ki_flags & IOCB_DIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return ext4_dio_read_iter(iocb, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return generic_file_read_iter(iocb, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Called when an inode is released. Note that this is different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * from ext4_file_open: open gets called at every open, but release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * gets called only when /all/ the files are closed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static int ext4_release_file(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) ext4_alloc_da_blocks(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* if we are the last writer on the inode, drop the block reservation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if ((filp->f_mode & FMODE_WRITE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) (atomic_read(&inode->i_writecount) == 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) !EXT4_I(inode)->i_reserved_data_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) down_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ext4_discard_preallocations(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) up_write(&EXT4_I(inode)->i_data_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (is_dx(inode) && filp->private_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ext4_htree_free_dir_info(filp->private_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * This tests whether the IO in question is block-aligned or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * are converted to written only after the IO is complete. Until they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * mapped, these blocks appear as holes, so dio_zero_block() will assume that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * it needs to zero out portions of the start and/or end block. If 2 AIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * threads are at work on the same unwritten block, they must be synchronized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * or one thread will zero the other's data, causing corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ext4_unaligned_io(struct inode *inode, struct iov_iter *from, loff_t pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned long blockmask = sb->s_blocksize - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if ((pos | iov_iter_alignment(from)) & blockmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) ext4_extending_io(struct inode *inode, loff_t offset, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (offset + len > i_size_read(inode) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) offset + len > EXT4_I(inode)->i_disksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* Is IO overwriting allocated and initialized blocks? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static bool ext4_overwrite_io(struct inode *inode, loff_t pos, loff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct ext4_map_blocks map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned int blkbits = inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int err, blklen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (pos + len > i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) map.m_lblk = pos >> blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) blklen = map.m_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) err = ext4_map_blocks(NULL, inode, &map, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * 'err==len' means that all of the blocks have been preallocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * regardless of whether they have been initialized or not. To exclude
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * unwritten extents, we need to check m_flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return err == blklen && (map.m_flags & EXT4_MAP_MAPPED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static ssize_t ext4_generic_write_checks(struct kiocb *iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct inode *inode = file_inode(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (unlikely(IS_IMMUTABLE(inode)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ret = generic_write_checks(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * If we have encountered a bitmap-format file, the size limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * is smaller than s_maxbytes, which is for extent-mapped files.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (iocb->ki_pos >= sbi->s_bitmap_maxbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) iov_iter_truncate(from, sbi->s_bitmap_maxbytes - iocb->ki_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return iov_iter_count(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) ssize_t ret, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) count = ext4_generic_write_checks(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (count <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) ret = file_modified(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static ssize_t ext4_buffered_write_iter(struct kiocb *iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct inode *inode = file_inode(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (iocb->ki_flags & IOCB_NOWAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) ext4_fc_start_update(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ret = ext4_write_checks(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) current->backing_dev_info = inode_to_bdi(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ret = generic_perform_write(iocb->ki_filp, from, iocb->ki_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) current->backing_dev_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ext4_fc_stop_update(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (likely(ret > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) iocb->ki_pos += ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) ret = generic_write_sync(iocb, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ssize_t written, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) bool truncate = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) u8 blkbits = inode->i_blkbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ext4_lblk_t written_blk, end_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Note that EXT4_I(inode)->i_disksize can get extended up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * inode->i_size while the I/O was running due to writeback of delalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * blocks. But, the code in ext4_iomap_alloc() is careful to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * zeroed/unwritten extents if this is possible; thus we won't leave
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * uninitialized blocks in a file even if we didn't succeed in writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * as much as we intended.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) WARN_ON_ONCE(i_size_read(inode) < EXT4_I(inode)->i_disksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) if (offset + count <= EXT4_I(inode)->i_disksize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * We need to ensure that the inode is removed from the orphan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * list if it has been added prematurely, due to writeback of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * delalloc blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (!list_empty(&EXT4_I(inode)->i_orphan) && inode->i_nlink) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ext4_orphan_del(NULL, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ext4_orphan_del(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (written < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) goto truncate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) written = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) goto truncate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (ext4_update_inode_size(inode, offset + written)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ret = ext4_mark_inode_dirty(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) written = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) goto truncate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * We may need to truncate allocated but not written blocks beyond EOF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) written_blk = ALIGN(offset + written, 1 << blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) end_blk = ALIGN(offset + count, 1 << blkbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (written_blk < end_blk && ext4_can_truncate(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) truncate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * Remove the inode from the orphan list if it has been extended and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * everything went OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (!truncate && inode->i_nlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ext4_orphan_del(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (truncate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) truncate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ext4_truncate_failed_write(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * If the truncate operation failed early, then the inode may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * still be on the orphan list. In that case, we need to try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * remove the inode from the in-memory linked list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (inode->i_nlink)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ext4_orphan_del(NULL, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return written;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static int ext4_dio_write_end_io(struct kiocb *iocb, ssize_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int error, unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) loff_t pos = iocb->ki_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct inode *inode = file_inode(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (size && flags & IOMAP_DIO_UNWRITTEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) error = ext4_convert_unwritten_extents(NULL, inode, pos, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * If we are extending the file, we have to update i_size here before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * page cache gets invalidated in iomap_dio_rw(). Otherwise racing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * buffered reads could zero out too much from page cache pages. Update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * of on-disk size will happen later in ext4_dio_write_iter() where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * we have enough information to also perform orphan list handling etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * Note that we perform all extending writes synchronously under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * i_rwsem held exclusively so i_size update is safe here in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * If the write was not extending, we cannot see pos > i_size here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * because operations reducing i_size like truncate wait for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * outstanding DIO before updating i_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) pos += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (pos > i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) i_size_write(inode, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static const struct iomap_dio_ops ext4_dio_write_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) .end_io = ext4_dio_write_end_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * The intention here is to start with shared lock acquired then see if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * condition requires an exclusive inode lock. If yes, then we restart the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * whole operation by releasing the shared lock and acquiring exclusive lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * - For unaligned_io we never take shared lock as it may cause data corruption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * when two unaligned IO tries to modify the same block e.g. while zeroing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * - For extending writes case we don't take the shared lock, since it requires
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * updating inode i_disksize and/or orphan handling with exclusive lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * - shared locking will only be true mostly with overwrites. Otherwise we will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * switch to exclusive i_rwsem lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) static ssize_t ext4_dio_write_checks(struct kiocb *iocb, struct iov_iter *from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) bool *ilock_shared, bool *extend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) struct file *file = iocb->ki_filp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct inode *inode = file_inode(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) loff_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) size_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ret = ext4_generic_write_checks(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) offset = iocb->ki_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) count = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (ext4_extending_io(inode, offset, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) *extend = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * Determine whether the IO operation will overwrite allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * and initialized blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * We need exclusive i_rwsem for changing security info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * in file_modified().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (*ilock_shared && (!IS_NOSEC(inode) || *extend ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) !ext4_overwrite_io(inode, offset, count))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (iocb->ki_flags & IOCB_NOWAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) inode_unlock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) *ilock_shared = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ret = file_modified(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (*ilock_shared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) inode_unlock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct inode *inode = file_inode(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) loff_t offset = iocb->ki_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) size_t count = iov_iter_count(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) const struct iomap_ops *iomap_ops = &ext4_iomap_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) bool extend = false, unaligned_io = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) bool ilock_shared = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * We initially start with shared inode lock unless it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * unaligned IO which needs exclusive lock anyways.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (ext4_unaligned_io(inode, from, offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) unaligned_io = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ilock_shared = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * Quick check here without any i_rwsem lock to see if it is extending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * IO. A more reliable check is done in ext4_dio_write_checks() with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * proper locking in place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (offset + count > i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ilock_shared = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (iocb->ki_flags & IOCB_NOWAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (ilock_shared) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (!inode_trylock_shared(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (!inode_trylock(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (ilock_shared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) inode_lock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) /* Fallback to buffered I/O if the inode does not support direct I/O. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (!ext4_dio_supported(iocb, from)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (ilock_shared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) inode_unlock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return ext4_buffered_write_iter(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ret = ext4_dio_write_checks(iocb, from, &ilock_shared, &extend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /* if we're going to block and IOCB_NOWAIT is set, return -EAGAIN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if ((iocb->ki_flags & IOCB_NOWAIT) && (unaligned_io || extend)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) offset = iocb->ki_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) count = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * Unaligned direct IO must be serialized among each other as zeroing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * of partial blocks of two competing unaligned IOs can result in data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) * So we make sure we don't allow any unaligned IO in flight.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * For IOs where we need not wait (like unaligned non-AIO DIO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * below inode_dio_wait() may anyway become a no-op, since we start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * with exclusive lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (unaligned_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) inode_dio_wait(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (extend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ext4_fc_start_update(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) ret = ext4_orphan_add(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ext4_fc_stop_update(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (ilock_shared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) iomap_ops = &ext4_iomap_overwrite_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) is_sync_kiocb(iocb) || unaligned_io || extend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (ret == -ENOTBLK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (extend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ret = ext4_handle_inode_extension(inode, offset, ret, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (ilock_shared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) inode_unlock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (ret >= 0 && iov_iter_count(from)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) ssize_t err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) loff_t endbyte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) offset = iocb->ki_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) err = ext4_buffered_write_iter(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * We need to ensure that the pages within the page cache for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * the range covered by this I/O are written to disk and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * invalidated. This is in attempt to preserve the expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * direct I/O semantics in the case we fallback to buffered I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * to complete off the I/O request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ret += err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) endbyte = offset + err - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) err = filemap_write_and_wait_range(iocb->ki_filp->f_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) offset, endbyte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) invalidate_mapping_pages(iocb->ki_filp->f_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) offset >> PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) endbyte >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) #ifdef CONFIG_FS_DAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) ext4_dax_write_iter(struct kiocb *iocb, struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) size_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) loff_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) bool extend = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct inode *inode = file_inode(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (iocb->ki_flags & IOCB_NOWAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (!inode_trylock(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) ret = ext4_write_checks(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) offset = iocb->ki_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) count = iov_iter_count(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (offset + count > EXT4_I(inode)->i_disksize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) ret = ext4_orphan_add(handle, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) extend = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) ret = dax_iomap_rw(iocb, from, &ext4_iomap_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (extend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) ret = ext4_handle_inode_extension(inode, offset, ret, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ret = generic_write_sync(iocb, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct inode *inode = file_inode(iocb->ki_filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) #ifdef CONFIG_FS_DAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (IS_DAX(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return ext4_dax_write_iter(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (iocb->ki_flags & IOCB_DIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return ext4_dio_write_iter(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return ext4_buffered_write_iter(iocb, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) #ifdef CONFIG_FS_DAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) static vm_fault_t ext4_dax_huge_fault(struct vm_fault *vmf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) enum page_entry_size pe_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) vm_fault_t result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) int retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) handle_t *handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct inode *inode = file_inode(vmf->vma->vm_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * We have to distinguish real writes from writes which will result in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * COW page; COW writes should *not* poke the journal (the file will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * be changed). Doing so would cause unintended failures when mounted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * read-only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * We check for VM_SHARED rather than vmf->cow_page since the latter is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) * unset for pe_size != PE_SIZE_PTE (i.e. only in do_cow_fault); for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * other sizes, dax_iomap_fault will handle splitting / fallback so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * we eventually come back with a COW page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) (vmf->vma->vm_flags & VM_SHARED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) pfn_t pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) sb_start_pagefault(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) file_update_time(vmf->vma->vm_file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) down_read(&EXT4_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) EXT4_DATA_TRANS_BLOCKS(sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) up_read(&EXT4_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) sb_end_pagefault(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) down_read(&EXT4_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) result = dax_iomap_fault(vmf, pe_size, &pfn, &error, &ext4_iomap_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (write) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if ((result & VM_FAULT_ERROR) && error == -ENOSPC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) ext4_should_retry_alloc(sb, &retries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* Handling synchronous page fault? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (result & VM_FAULT_NEEDDSYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) result = dax_finish_sync_fault(vmf, pe_size, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) up_read(&EXT4_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) sb_end_pagefault(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) up_read(&EXT4_I(inode)->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) static vm_fault_t ext4_dax_fault(struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return ext4_dax_huge_fault(vmf, PE_SIZE_PTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static const struct vm_operations_struct ext4_dax_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) .fault = ext4_dax_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) .huge_fault = ext4_dax_huge_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) .page_mkwrite = ext4_dax_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) .pfn_mkwrite = ext4_dax_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) #define ext4_dax_vm_ops ext4_file_vm_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) static const struct vm_operations_struct ext4_file_vm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) .fault = ext4_filemap_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) .map_pages = filemap_map_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .page_mkwrite = ext4_page_mkwrite,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) .allow_speculation = filemap_allow_speculation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct dax_device *dax_dev = sbi->s_daxdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (unlikely(ext4_forced_shutdown(sbi)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * We don't support synchronous mappings for non-DAX files and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * for DAX files if underneath dax_device is not synchronous.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (!daxdev_mapping_supported(vma, dax_dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) file_accessed(file);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (IS_DAX(file_inode(file))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) vma->vm_ops = &ext4_dax_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) vma->vm_flags |= VM_HUGEPAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) vma->vm_ops = &ext4_file_vm_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static int ext4_sample_last_mounted(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct vfsmount *mnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct ext4_sb_info *sbi = EXT4_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct path path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) char buf[64], *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (likely(ext4_test_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (sb_rdonly(sb) || !sb_start_intwrite_trylock(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) ext4_set_mount_flag(sb, EXT4_MF_MNTDIR_SAMPLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * Sample where the filesystem has been mounted and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * store it in the superblock for sysadmin convenience
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * when trying to sort through large numbers of block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * devices or filesystem images.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) memset(buf, 0, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) path.mnt = mnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) path.dentry = mnt->mnt_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) cp = d_path(&path, buf, sizeof(buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (IS_ERR(cp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) err = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (IS_ERR(handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) BUFFER_TRACE(sbi->s_sbh, "get_write_access");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) err = ext4_journal_get_write_access(handle, sbi->s_sbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) goto out_journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) strncpy(sbi->s_es->s_last_mounted, cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) sizeof(sbi->s_es->s_last_mounted));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) ext4_handle_dirty_super(handle, sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) out_journal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) ext4_journal_stop(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) sb_end_intwrite(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) static int ext4_file_open(struct inode *inode, struct file *filp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) ret = ext4_sample_last_mounted(inode->i_sb, filp->f_path.mnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ret = fscrypt_file_open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ret = fsverity_file_open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * Set up the jbd2_inode if we are opening the inode for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * writing and the journal is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (filp->f_mode & FMODE_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) ret = ext4_inode_attach_jinode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) return dquot_file_open(inode, filp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * ext4_llseek() handles both block-mapped and extent-mapped maxbytes values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * by calling generic_file_llseek_size() with the appropriate maxbytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * value for each.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) loff_t ext4_llseek(struct file *file, loff_t offset, int whence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) loff_t maxbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) maxbytes = inode->i_sb->s_maxbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) switch (whence) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return generic_file_llseek_size(file, offset, whence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) maxbytes, i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) case SEEK_HOLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) inode_lock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) offset = iomap_seek_hole(inode, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) &ext4_iomap_report_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) inode_unlock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) case SEEK_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) inode_lock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) offset = iomap_seek_data(inode, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) &ext4_iomap_report_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) inode_unlock_shared(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (offset < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return vfs_setpos(file, offset, maxbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) const struct file_operations ext4_file_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) .llseek = ext4_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) .read_iter = ext4_file_read_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) .write_iter = ext4_file_write_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) .iopoll = iomap_dio_iopoll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) .unlocked_ioctl = ext4_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) .compat_ioctl = ext4_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) .mmap = ext4_file_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) .mmap_supported_flags = MAP_SYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) .open = ext4_file_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) .release = ext4_release_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) .fsync = ext4_sync_file,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) .get_unmapped_area = thp_get_unmapped_area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) .splice_read = generic_file_splice_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) .splice_write = iter_file_splice_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) .fallocate = ext4_fallocate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) const struct inode_operations ext4_file_inode_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) .setattr = ext4_setattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) .getattr = ext4_file_getattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) .listxattr = ext4_listxattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) .get_acl = ext4_get_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) .set_acl = ext4_set_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) .fiemap = ext4_fiemap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)