^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2016 Trond Myklebust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2019 Jeff Layton
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * I/O and data path helper functionality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Heavily borrowed from equivalent code in fs/nfs/io.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/ceph/ceph_debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "super.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /* Call with exclusively locked inode->i_rwsem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static void ceph_block_o_direct(struct ceph_inode_info *ci, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) lockdep_assert_held_write(&inode->i_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) spin_lock(&ci->i_ceph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) ci->i_ceph_flags &= ~CEPH_I_ODIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) spin_unlock(&ci->i_ceph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) inode_dio_wait(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * ceph_start_io_read - declare the file is being used for buffered reads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * @inode: file inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Declare that a buffered read operation is about to start, and ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * that we block all direct I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * On exit, the function ensures that the CEPH_I_ODIRECT flag is unset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * and holds a shared lock on inode->i_rwsem to ensure that the flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * cannot be changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * In practice, this means that buffered read operations are allowed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * execute in parallel, thanks to the shared lock, whereas direct I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * operations need to wait to grab an exclusive lock in order to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * CEPH_I_ODIRECT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * Note that buffered writes and truncates both take a write lock on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * inode->i_rwsem, meaning that those are serialised w.r.t. the reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) ceph_start_io_read(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct ceph_inode_info *ci = ceph_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /* Be an optimist! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) down_read(&inode->i_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) up_read(&inode->i_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* Slow path.... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) down_write(&inode->i_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) ceph_block_o_direct(ci, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) downgrade_write(&inode->i_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * ceph_end_io_read - declare that the buffered read operation is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @inode: file inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Declare that a buffered read operation is done, and release the shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * lock on inode->i_rwsem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) ceph_end_io_read(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) up_read(&inode->i_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * ceph_start_io_write - declare the file is being used for buffered writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * @inode: file inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * Declare that a buffered write operation is about to start, and ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * that we block all direct I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) ceph_start_io_write(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) down_write(&inode->i_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ceph_block_o_direct(ceph_inode(inode), inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * ceph_end_io_write - declare that the buffered write operation is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * @inode: file inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * Declare that a buffered write operation is done, and release the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * lock on inode->i_rwsem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) ceph_end_io_write(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) up_write(&inode->i_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Call with exclusively locked inode->i_rwsem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static void ceph_block_buffered(struct ceph_inode_info *ci, struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) lockdep_assert_held_write(&inode->i_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (!(READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) spin_lock(&ci->i_ceph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) ci->i_ceph_flags |= CEPH_I_ODIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) spin_unlock(&ci->i_ceph_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* FIXME: unmap_mapping_range? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) filemap_write_and_wait(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * ceph_end_io_direct - declare the file is being used for direct i/o
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * @inode: file inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * Declare that a direct I/O operation is about to start, and ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * that we block all buffered I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * On exit, the function ensures that the CEPH_I_ODIRECT flag is set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * and holds a shared lock on inode->i_rwsem to ensure that the flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * cannot be changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * In practice, this means that direct I/O operations are allowed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * execute in parallel, thanks to the shared lock, whereas buffered I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * operations need to wait to grab an exclusive lock in order to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * CEPH_I_ODIRECT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * Note that buffered writes and truncates both take a write lock on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * inode->i_rwsem, meaning that those are serialised w.r.t. O_DIRECT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) ceph_start_io_direct(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct ceph_inode_info *ci = ceph_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* Be an optimist! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) down_read(&inode->i_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) if (READ_ONCE(ci->i_ceph_flags) & CEPH_I_ODIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) up_read(&inode->i_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Slow path.... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) down_write(&inode->i_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ceph_block_buffered(ci, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) downgrade_write(&inode->i_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * ceph_end_io_direct - declare that the direct i/o operation is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * @inode: file inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Declare that a direct I/O operation is done, and release the shared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * lock on inode->i_rwsem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ceph_end_io_direct(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) up_read(&inode->i_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }