b24413180f560 (Greg Kroah-Hartman 2017-11-01 15:07:57 +0100 1) // SPDX-License-Identifier: GPL-2.0
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 2) /*
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 3) * High-level sync()-related operations
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 4) */
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 5)
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 6) #include <linux/kernel.h>
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 7) #include <linux/file.h>
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 8) #include <linux/fs.h>
5a0e3ad6af866 (Tejun Heo 2010-03-24 17:04:11 +0900 9) #include <linux/slab.h>
630d9c47274aa (Paul Gortmaker 2011-11-16 23:57:37 -0500 10) #include <linux/export.h>
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 11) #include <linux/namei.h>
914e26379decf (Al Viro 2006-10-18 13:55:46 -0400 12) #include <linux/sched.h>
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 13) #include <linux/writeback.h>
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 14) #include <linux/syscalls.h>
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 15) #include <linux/linkage.h>
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 16) #include <linux/pagemap.h>
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 17) #include <linux/quotaops.h>
5129a469a91a9 (Jörn Engel 2010-04-25 08:54:42 +0200 18) #include <linux/backing-dev.h>
5a3e5cb8e08bd (Jan Kara 2009-04-27 16:43:48 +0200 19) #include "internal.h"
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 20)
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 21) #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 22) SYNC_FILE_RANGE_WAIT_AFTER)
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 23)
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 24) /*
d8a8559cd7a9c (Jens Axboe 2009-09-02 12:34:32 +0200 25) * Do the filesystem syncing work. For simple filesystems
d8a8559cd7a9c (Jens Axboe 2009-09-02 12:34:32 +0200 26) * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to
d8a8559cd7a9c (Jens Axboe 2009-09-02 12:34:32 +0200 27) * submit IO for these buffers via __sync_blockdev(). This also speeds up the
d8a8559cd7a9c (Jens Axboe 2009-09-02 12:34:32 +0200 28) * wait == 1 case since in that case write_inode() functions do
d8a8559cd7a9c (Jens Axboe 2009-09-02 12:34:32 +0200 29) * sync_dirty_buffer() and thus effectively write one block at a time.
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 30) */
0dc83bd30b0bf (Jan Kara 2014-02-21 11:19:04 +0100 31) static int __sync_filesystem(struct super_block *sb, int wait)
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 32) {
5fb324ad24feb (Christoph Hellwig 2010-02-16 03:44:52 -0500 33) if (wait)
0dc83bd30b0bf (Jan Kara 2014-02-21 11:19:04 +0100 34) sync_inodes_sb(sb);
5fb324ad24feb (Christoph Hellwig 2010-02-16 03:44:52 -0500 35) else
0e175a1835ffc (Curt Wohlgemuth 2011-10-07 21:54:10 -0600 36) writeback_inodes_sb(sb, WB_REASON_SYNC);
5fb324ad24feb (Christoph Hellwig 2010-02-16 03:44:52 -0500 37)
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 38) if (sb->s_op->sync_fs)
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 39) sb->s_op->sync_fs(sb, wait);
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 40) return __sync_blockdev(sb->s_bdev, wait);
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 41) }
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 42)
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 43) /*
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 44) * Write out and wait upon all dirty data associated with this
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 45) * superblock. Filesystem data as well as the underlying block
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 46) * device. Takes the superblock lock.
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 47) */
60b0680fa236a (Jan Kara 2009-04-27 16:43:53 +0200 48) int sync_filesystem(struct super_block *sb)
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 49) {
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 50) int ret;
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 51)
5af7926ff33b6 (Christoph Hellwig 2009-05-05 15:41:25 +0200 52) /*
5af7926ff33b6 (Christoph Hellwig 2009-05-05 15:41:25 +0200 53) * We need to be protected against the filesystem going from
5af7926ff33b6 (Christoph Hellwig 2009-05-05 15:41:25 +0200 54) * r/o to r/w or vice versa.
5af7926ff33b6 (Christoph Hellwig 2009-05-05 15:41:25 +0200 55) */
5af7926ff33b6 (Christoph Hellwig 2009-05-05 15:41:25 +0200 56) WARN_ON(!rwsem_is_locked(&sb->s_umount));
5af7926ff33b6 (Christoph Hellwig 2009-05-05 15:41:25 +0200 57)
5af7926ff33b6 (Christoph Hellwig 2009-05-05 15:41:25 +0200 58) /*
5af7926ff33b6 (Christoph Hellwig 2009-05-05 15:41:25 +0200 59) * No point in syncing out anything if the filesystem is read-only.
5af7926ff33b6 (Christoph Hellwig 2009-05-05 15:41:25 +0200 60) */
bc98a42c1f7d0 (David Howells 2017-07-17 08:45:34 +0100 61) if (sb_rdonly(sb))
5af7926ff33b6 (Christoph Hellwig 2009-05-05 15:41:25 +0200 62) return 0;
5af7926ff33b6 (Christoph Hellwig 2009-05-05 15:41:25 +0200 63)
0dc83bd30b0bf (Jan Kara 2014-02-21 11:19:04 +0100 64) ret = __sync_filesystem(sb, 0);
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 65) if (ret < 0)
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 66) return ret;
0dc83bd30b0bf (Jan Kara 2014-02-21 11:19:04 +0100 67) return __sync_filesystem(sb, 1);
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 68) }
10096fb1088e5 (Anton Altaparmakov 2014-08-21 11:09:27 +0100 69) EXPORT_SYMBOL(sync_filesystem);
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 70)
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 71) static void sync_inodes_one_sb(struct super_block *sb, void *arg)
01a05b337a5b6 (Al Viro 2010-03-23 06:06:58 -0400 72) {
bc98a42c1f7d0 (David Howells 2017-07-17 08:45:34 +0100 73) if (!sb_rdonly(sb))
0dc83bd30b0bf (Jan Kara 2014-02-21 11:19:04 +0100 74) sync_inodes_sb(sb);
01a05b337a5b6 (Al Viro 2010-03-23 06:06:58 -0400 75) }
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 76)
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 77) static void sync_fs_one_sb(struct super_block *sb, void *arg)
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 78) {
32b1924b210a7 (Konstantin Khlebnikov 2020-04-09 11:29:47 +0300 79) if (!sb_rdonly(sb) && !(sb->s_iflags & SB_I_SKIP_SYNC) &&
32b1924b210a7 (Konstantin Khlebnikov 2020-04-09 11:29:47 +0300 80) sb->s_op->sync_fs)
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 81) sb->s_op->sync_fs(sb, *(int *)arg);
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 82) }
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 83)
d0e91b13eb34d (Jan Kara 2012-07-03 16:45:33 +0200 84) static void fdatawrite_one_bdev(struct block_device *bdev, void *arg)
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 85) {
d0e91b13eb34d (Jan Kara 2012-07-03 16:45:33 +0200 86) filemap_fdatawrite(bdev->bd_inode->i_mapping);
a8c7176b6ded4 (Jan Kara 2012-07-03 16:45:32 +0200 87) }
a8c7176b6ded4 (Jan Kara 2012-07-03 16:45:32 +0200 88)
d0e91b13eb34d (Jan Kara 2012-07-03 16:45:33 +0200 89) static void fdatawait_one_bdev(struct block_device *bdev, void *arg)
a8c7176b6ded4 (Jan Kara 2012-07-03 16:45:32 +0200 90) {
aa750fd71c242 (Junichi Nomura 2015-11-05 18:47:23 -0800 91) /*
aa750fd71c242 (Junichi Nomura 2015-11-05 18:47:23 -0800 92) * We keep the error status of individual mapping so that
aa750fd71c242 (Junichi Nomura 2015-11-05 18:47:23 -0800 93) * applications can catch the writeback error using fsync(2).
aa750fd71c242 (Junichi Nomura 2015-11-05 18:47:23 -0800 94) * See filemap_fdatawait_keep_errors() for details.
aa750fd71c242 (Junichi Nomura 2015-11-05 18:47:23 -0800 95) */
aa750fd71c242 (Junichi Nomura 2015-11-05 18:47:23 -0800 96) filemap_fdatawait_keep_errors(bdev->bd_inode->i_mapping);
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 97) }
c15c54f5f056e (Jan Kara 2009-04-27 16:43:52 +0200 98)
3beab0b42413e (Zhang, Yanmin 2009-07-05 12:08:08 -0700 99) /*
4ea425b63a3df (Jan Kara 2012-07-03 16:45:34 +0200 100) * Sync everything. We start by waking flusher threads so that most of
4ea425b63a3df (Jan Kara 2012-07-03 16:45:34 +0200 101) * writeback runs on all devices in parallel. Then we sync all inodes reliably
4ea425b63a3df (Jan Kara 2012-07-03 16:45:34 +0200 102) * which effectively also waits for all flusher threads to finish doing
4ea425b63a3df (Jan Kara 2012-07-03 16:45:34 +0200 103) * writeback. At this point all data is on disk so metadata should be stable
4ea425b63a3df (Jan Kara 2012-07-03 16:45:34 +0200 104) * and we tell filesystems to sync their metadata via ->sync_fs() calls.
4ea425b63a3df (Jan Kara 2012-07-03 16:45:34 +0200 105) * Finally, we writeout all block devices because some filesystems (e.g. ext2)
4ea425b63a3df (Jan Kara 2012-07-03 16:45:34 +0200 106) * just write metadata (such as inodes or bitmaps) to block device page cache
4ea425b63a3df (Jan Kara 2012-07-03 16:45:34 +0200 107) * and do not sync it on their own in ->sync_fs().
3beab0b42413e (Zhang, Yanmin 2009-07-05 12:08:08 -0700 108) */
70f68ee81e2e9 (Dominik Brodowski 2018-03-14 22:35:11 +0100 109) void ksys_sync(void)
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 110) {
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 111) int nowait = 0, wait = 1;
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 112)
9ba4b2dfafaa7 (Jens Axboe 2017-09-20 08:58:25 -0600 113) wakeup_flusher_threads(WB_REASON_SYNC);
0dc83bd30b0bf (Jan Kara 2014-02-21 11:19:04 +0100 114) iterate_supers(sync_inodes_one_sb, NULL);
4ea425b63a3df (Jan Kara 2012-07-03 16:45:34 +0200 115) iterate_supers(sync_fs_one_sb, &nowait);
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 116) iterate_supers(sync_fs_one_sb, &wait);
d0e91b13eb34d (Jan Kara 2012-07-03 16:45:33 +0200 117) iterate_bdevs(fdatawrite_one_bdev, NULL);
d0e91b13eb34d (Jan Kara 2012-07-03 16:45:33 +0200 118) iterate_bdevs(fdatawait_one_bdev, NULL);
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 119) if (unlikely(laptop_mode))
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 120) laptop_sync_completion();
70f68ee81e2e9 (Dominik Brodowski 2018-03-14 22:35:11 +0100 121) }
70f68ee81e2e9 (Dominik Brodowski 2018-03-14 22:35:11 +0100 122)
70f68ee81e2e9 (Dominik Brodowski 2018-03-14 22:35:11 +0100 123) SYSCALL_DEFINE0(sync)
70f68ee81e2e9 (Dominik Brodowski 2018-03-14 22:35:11 +0100 124) {
70f68ee81e2e9 (Dominik Brodowski 2018-03-14 22:35:11 +0100 125) ksys_sync();
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 126) return 0;
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 127) }
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 128)
a2a9537ac0b37 (Jens Axboe 2009-03-17 09:38:40 +0100 129) static void do_sync_work(struct work_struct *work)
a2a9537ac0b37 (Jens Axboe 2009-03-17 09:38:40 +0100 130) {
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 131) int nowait = 0;
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 132)
5cee5815d1564 (Jan Kara 2009-04-27 16:43:51 +0200 133) /*
5cee5815d1564 (Jan Kara 2009-04-27 16:43:51 +0200 134) * Sync twice to reduce the possibility we skipped some inodes / pages
5cee5815d1564 (Jan Kara 2009-04-27 16:43:51 +0200 135) * because they were temporarily locked
5cee5815d1564 (Jan Kara 2009-04-27 16:43:51 +0200 136) */
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 137) iterate_supers(sync_inodes_one_sb, &nowait);
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 138) iterate_supers(sync_fs_one_sb, &nowait);
d0e91b13eb34d (Jan Kara 2012-07-03 16:45:33 +0200 139) iterate_bdevs(fdatawrite_one_bdev, NULL);
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 140) iterate_supers(sync_inodes_one_sb, &nowait);
b3de653105180 (Jan Kara 2012-07-03 16:45:30 +0200 141) iterate_supers(sync_fs_one_sb, &nowait);
d0e91b13eb34d (Jan Kara 2012-07-03 16:45:33 +0200 142) iterate_bdevs(fdatawrite_one_bdev, NULL);
5cee5815d1564 (Jan Kara 2009-04-27 16:43:51 +0200 143) printk("Emergency Sync complete\n");
a2a9537ac0b37 (Jens Axboe 2009-03-17 09:38:40 +0100 144) kfree(work);
a2a9537ac0b37 (Jens Axboe 2009-03-17 09:38:40 +0100 145) }
a2a9537ac0b37 (Jens Axboe 2009-03-17 09:38:40 +0100 146)
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 147) void emergency_sync(void)
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 148) {
a2a9537ac0b37 (Jens Axboe 2009-03-17 09:38:40 +0100 149) struct work_struct *work;
a2a9537ac0b37 (Jens Axboe 2009-03-17 09:38:40 +0100 150)
a2a9537ac0b37 (Jens Axboe 2009-03-17 09:38:40 +0100 151) work = kmalloc(sizeof(*work), GFP_ATOMIC);
a2a9537ac0b37 (Jens Axboe 2009-03-17 09:38:40 +0100 152) if (work) {
a2a9537ac0b37 (Jens Axboe 2009-03-17 09:38:40 +0100 153) INIT_WORK(work, do_sync_work);
a2a9537ac0b37 (Jens Axboe 2009-03-17 09:38:40 +0100 154) schedule_work(work);
a2a9537ac0b37 (Jens Axboe 2009-03-17 09:38:40 +0100 155) }
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 156) }
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 157)
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 158) /*
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 159) * sync a single super
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 160) */
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 161) SYSCALL_DEFINE1(syncfs, int, fd)
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 162) {
2903ff019b346 (Al Viro 2012-08-28 12:52:22 -0400 163) struct fd f = fdget(fd);
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 164) struct super_block *sb;
735e4ae5ba28c (Jeff Layton 2020-06-01 21:45:36 -0700 165) int ret, ret2;
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 166)
2903ff019b346 (Al Viro 2012-08-28 12:52:22 -0400 167) if (!f.file)
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 168) return -EBADF;
b583043e99bc6 (Al Viro 2014-10-31 01:22:04 -0400 169) sb = f.file->f_path.dentry->d_sb;
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 170)
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 171) down_read(&sb->s_umount);
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 172) ret = sync_filesystem(sb);
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 173) up_read(&sb->s_umount);
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 174)
735e4ae5ba28c (Jeff Layton 2020-06-01 21:45:36 -0700 175) ret2 = errseq_check_and_advance(&sb->s_wb_err, &f.file->f_sb_err);
735e4ae5ba28c (Jeff Layton 2020-06-01 21:45:36 -0700 176)
2903ff019b346 (Al Viro 2012-08-28 12:52:22 -0400 177) fdput(f);
735e4ae5ba28c (Jeff Layton 2020-06-01 21:45:36 -0700 178) return ret ? ret : ret2;
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 179) }
b7ed78f565750 (Sage Weil 2011-03-10 11:31:30 -0800 180)
4c728ef583b3d (Christoph Hellwig 2008-12-22 21:11:15 +0100 181) /**
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 182) * vfs_fsync_range - helper to sync a range of data & metadata to disk
4c728ef583b3d (Christoph Hellwig 2008-12-22 21:11:15 +0100 183) * @file: file to sync
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 184) * @start: offset in bytes of the beginning of data range to sync
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 185) * @end: offset in bytes of the end of data range (inclusive)
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 186) * @datasync: perform only datasync
4c728ef583b3d (Christoph Hellwig 2008-12-22 21:11:15 +0100 187) *
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 188) * Write back data in range @start..@end and metadata for @file to disk. If
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 189) * @datasync is set only metadata needed to access modified file data is
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 190) * written.
4c728ef583b3d (Christoph Hellwig 2008-12-22 21:11:15 +0100 191) */
8018ab0574809 (Christoph Hellwig 2010-03-22 17:32:25 +0100 192) int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync)
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 193) {
0ae45f63d4ef8 (Theodore Ts'o 2015-02-02 00:37:00 -0500 194) struct inode *inode = file->f_mapping->host;
0ae45f63d4ef8 (Theodore Ts'o 2015-02-02 00:37:00 -0500 195)
72c2d53192004 (Al Viro 2013-09-22 16:27:52 -0400 196) if (!file->f_op->fsync)
02c24a82187d5 (Josef Bacik 2011-07-16 20:44:56 -0400 197) return -EINVAL;
0d07e5573ffb3 (Christoph Hellwig 2018-03-06 17:03:31 -0800 198) if (!datasync && (inode->i_state & I_DIRTY_TIME))
0ae45f63d4ef8 (Theodore Ts'o 2015-02-02 00:37:00 -0500 199) mark_inode_dirty_sync(inode);
0f41074a65757 (Jeff Layton 2017-07-05 15:26:50 -0400 200) return file->f_op->fsync(file, start, end, datasync);
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 201) }
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 202) EXPORT_SYMBOL(vfs_fsync_range);
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 203)
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 204) /**
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 205) * vfs_fsync - perform a fsync or fdatasync on a file
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 206) * @file: file to sync
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 207) * @datasync: only perform a fdatasync operation
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 208) *
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 209) * Write back data and metadata for @file to disk. If @datasync is
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 210) * set only metadata needed to access modified file data is written.
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 211) */
8018ab0574809 (Christoph Hellwig 2010-03-22 17:32:25 +0100 212) int vfs_fsync(struct file *file, int datasync)
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 213) {
8018ab0574809 (Christoph Hellwig 2010-03-22 17:32:25 +0100 214) return vfs_fsync_range(file, 0, LLONG_MAX, datasync);
148f948ba877f (Jan Kara 2009-08-17 19:52:36 +0200 215) }
4c728ef583b3d (Christoph Hellwig 2008-12-22 21:11:15 +0100 216) EXPORT_SYMBOL(vfs_fsync);
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 217)
4c728ef583b3d (Christoph Hellwig 2008-12-22 21:11:15 +0100 218) static int do_fsync(unsigned int fd, int datasync)
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 219) {
2903ff019b346 (Al Viro 2012-08-28 12:52:22 -0400 220) struct fd f = fdget(fd);
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 221) int ret = -EBADF;
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 222)
2903ff019b346 (Al Viro 2012-08-28 12:52:22 -0400 223) if (f.file) {
2903ff019b346 (Al Viro 2012-08-28 12:52:22 -0400 224) ret = vfs_fsync(f.file, datasync);
2903ff019b346 (Al Viro 2012-08-28 12:52:22 -0400 225) fdput(f);
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 226) }
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 227) return ret;
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 228) }
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 229)
a5f8fa9e9ba5e (Heiko Carstens 2009-01-14 14:14:11 +0100 230) SYSCALL_DEFINE1(fsync, unsigned int, fd)
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 231) {
4c728ef583b3d (Christoph Hellwig 2008-12-22 21:11:15 +0100 232) return do_fsync(fd, 0);
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 233) }
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 234)
a5f8fa9e9ba5e (Heiko Carstens 2009-01-14 14:14:11 +0100 235) SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 236) {
4c728ef583b3d (Christoph Hellwig 2008-12-22 21:11:15 +0100 237) return do_fsync(fd, 1);
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 238) }
cf9a2ae8d4994 (David Howells 2006-08-29 19:05:54 +0100 239)
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 240) int sync_file_range(struct file *file, loff_t offset, loff_t nbytes,
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 241) unsigned int flags)
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 242) {
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 243) int ret;
7a0ad10c367ab (Christoph Hellwig 2009-12-17 14:24:40 +0100 244) struct address_space *mapping;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 245) loff_t endbyte; /* inclusive */
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 246) umode_t i_mode;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 247)
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 248) ret = -EINVAL;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 249) if (flags & ~VALID_FLAGS)
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 250) goto out;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 251)
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 252) endbyte = offset + nbytes;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 253)
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 254) if ((s64)offset < 0)
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 255) goto out;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 256) if ((s64)endbyte < 0)
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 257) goto out;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 258) if (endbyte < offset)
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 259) goto out;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 260)
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 261) if (sizeof(pgoff_t) == 4) {
09cbfeaf1a5a6 (Kirill A. Shutemov 2016-04-01 15:29:47 +0300 262) if (offset >= (0x100000000ULL << PAGE_SHIFT)) {
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 263) /*
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 264) * The range starts outside a 32 bit machine's
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 265) * pagecache addressing capabilities. Let it "succeed"
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 266) */
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 267) ret = 0;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 268) goto out;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 269) }
09cbfeaf1a5a6 (Kirill A. Shutemov 2016-04-01 15:29:47 +0300 270) if (endbyte >= (0x100000000ULL << PAGE_SHIFT)) {
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 271) /*
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 272) * Out to EOF
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 273) */
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 274) nbytes = 0;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 275) }
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 276) }
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 277)
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 278) if (nbytes == 0)
111ebb6e6f7bd (OGAWA Hirofumi 2006-06-23 02:03:26 -0700 279) endbyte = LLONG_MAX;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 280) else
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 281) endbyte--; /* inclusive */
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 282)
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 283) i_mode = file_inode(file)->i_mode;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 284) ret = -ESPIPE;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 285) if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) &&
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 286) !S_ISLNK(i_mode))
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 287) goto out;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 288)
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 289) mapping = file->f_mapping;
7a0ad10c367ab (Christoph Hellwig 2009-12-17 14:24:40 +0100 290) ret = 0;
7a0ad10c367ab (Christoph Hellwig 2009-12-17 14:24:40 +0100 291) if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) {
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 292) ret = file_fdatawait_range(file, offset, endbyte);
7a0ad10c367ab (Christoph Hellwig 2009-12-17 14:24:40 +0100 293) if (ret < 0)
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 294) goto out;
7a0ad10c367ab (Christoph Hellwig 2009-12-17 14:24:40 +0100 295) }
7a0ad10c367ab (Christoph Hellwig 2009-12-17 14:24:40 +0100 296)
7a0ad10c367ab (Christoph Hellwig 2009-12-17 14:24:40 +0100 297) if (flags & SYNC_FILE_RANGE_WRITE) {
c553ea4fdf270 (Amir Goldstein 2019-05-13 17:22:30 -0700 298) int sync_mode = WB_SYNC_NONE;
c553ea4fdf270 (Amir Goldstein 2019-05-13 17:22:30 -0700 299)
c553ea4fdf270 (Amir Goldstein 2019-05-13 17:22:30 -0700 300) if ((flags & SYNC_FILE_RANGE_WRITE_AND_WAIT) ==
c553ea4fdf270 (Amir Goldstein 2019-05-13 17:22:30 -0700 301) SYNC_FILE_RANGE_WRITE_AND_WAIT)
c553ea4fdf270 (Amir Goldstein 2019-05-13 17:22:30 -0700 302) sync_mode = WB_SYNC_ALL;
c553ea4fdf270 (Amir Goldstein 2019-05-13 17:22:30 -0700 303)
23d0127096cb9 (Jan Kara 2015-11-06 16:28:55 -0800 304) ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
c553ea4fdf270 (Amir Goldstein 2019-05-13 17:22:30 -0700 305) sync_mode);
7a0ad10c367ab (Christoph Hellwig 2009-12-17 14:24:40 +0100 306) if (ret < 0)
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 307) goto out;
7a0ad10c367ab (Christoph Hellwig 2009-12-17 14:24:40 +0100 308) }
7a0ad10c367ab (Christoph Hellwig 2009-12-17 14:24:40 +0100 309)
7a0ad10c367ab (Christoph Hellwig 2009-12-17 14:24:40 +0100 310) if (flags & SYNC_FILE_RANGE_WAIT_AFTER)
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 311) ret = file_fdatawait_range(file, offset, endbyte);
7a0ad10c367ab (Christoph Hellwig 2009-12-17 14:24:40 +0100 312)
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 313) out:
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 314) return ret;
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 315) }
f79e2abb9bd45 (Andrew Morton 2006-03-31 02:30:42 -0800 316)
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 317) /*
c553ea4fdf270 (Amir Goldstein 2019-05-13 17:22:30 -0700 318) * ksys_sync_file_range() permits finely controlled syncing over a segment of
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 319) * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is
c553ea4fdf270 (Amir Goldstein 2019-05-13 17:22:30 -0700 320) * zero then ksys_sync_file_range() will operate from offset out to EOF.
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 321) *
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 322) * The flag bits are:
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 323) *
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 324) * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 325) * before performing the write.
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 326) *
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 327) * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 328) * range which are not presently under writeback. Note that this may block for
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 329) * significant periods due to exhaustion of disk request structures.
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 330) *
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 331) * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 332) * after performing the write.
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 333) *
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 334) * Useful combinations of the flag bits are:
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 335) *
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 336) * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages
c553ea4fdf270 (Amir Goldstein 2019-05-13 17:22:30 -0700 337) * in the range which were dirty on entry to ksys_sync_file_range() are placed
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 338) * under writeout. This is a start-write-for-data-integrity operation.
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 339) *
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 340) * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 341) * are not presently under writeout. This is an asynchronous flush-to-disk
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 342) * operation. Not suitable for data integrity operations.
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 343) *
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 344) * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 345) * completion of writeout of all pages in the range. This will be used after an
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 346) * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 347) * for that operation to complete and to return the result.
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 348) *
c553ea4fdf270 (Amir Goldstein 2019-05-13 17:22:30 -0700 349) * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER
c553ea4fdf270 (Amir Goldstein 2019-05-13 17:22:30 -0700 350) * (a.k.a. SYNC_FILE_RANGE_WRITE_AND_WAIT):
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 351) * a traditional sync() operation. This is a write-for-data-integrity operation
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 352) * which will ensure that all pages in the range which were dirty on entry to
c553ea4fdf270 (Amir Goldstein 2019-05-13 17:22:30 -0700 353) * ksys_sync_file_range() are written to disk. It should be noted that disk
c553ea4fdf270 (Amir Goldstein 2019-05-13 17:22:30 -0700 354) * caches are not flushed by this call, so there are no guarantees here that the
c553ea4fdf270 (Amir Goldstein 2019-05-13 17:22:30 -0700 355) * data will be available on disk after a crash.
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 356) *
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 357) *
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 358) * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 359) * I/O errors or ENOSPC conditions and will return those to the caller, after
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 360) * clearing the EIO and ENOSPC flags in the address_space.
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 361) *
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 362) * It should be noted that none of these operations write out the file's
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 363) * metadata. So unless the application is strictly performing overwrites of
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 364) * already-instantiated disk blocks, there are no guarantees here that the data
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 365) * will be available after a crash.
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 366) */
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 367) int ksys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 368) unsigned int flags)
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 369) {
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 370) int ret;
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 371) struct fd f;
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 372)
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 373) ret = -EBADF;
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 374) f = fdget(fd);
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 375) if (f.file)
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 376) ret = sync_file_range(f.file, offset, nbytes, flags);
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 377)
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 378) fdput(f);
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 379) return ret;
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 380) }
22f96b3808c12 (Jens Axboe 2019-04-09 14:51:48 -0600 381)
806cbae1228cc (Dominik Brodowski 2018-03-11 11:34:47 +0100 382) SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes,
806cbae1228cc (Dominik Brodowski 2018-03-11 11:34:47 +0100 383) unsigned int, flags)
806cbae1228cc (Dominik Brodowski 2018-03-11 11:34:47 +0100 384) {
806cbae1228cc (Dominik Brodowski 2018-03-11 11:34:47 +0100 385) return ksys_sync_file_range(fd, offset, nbytes, flags);
806cbae1228cc (Dominik Brodowski 2018-03-11 11:34:47 +0100 386) }
806cbae1228cc (Dominik Brodowski 2018-03-11 11:34:47 +0100 387)
edd5cd4a9424f (David Woodhouse 2007-06-27 14:10:09 -0700 388) /* It would be nice if people remember that not all the world's an i386
edd5cd4a9424f (David Woodhouse 2007-06-27 14:10:09 -0700 389) when they introduce new system calls */
4a0fd5bf0fd07 (Al Viro 2013-01-21 15:16:58 -0500 390) SYSCALL_DEFINE4(sync_file_range2, int, fd, unsigned int, flags,
4a0fd5bf0fd07 (Al Viro 2013-01-21 15:16:58 -0500 391) loff_t, offset, loff_t, nbytes)
edd5cd4a9424f (David Woodhouse 2007-06-27 14:10:09 -0700 392) {
806cbae1228cc (Dominik Brodowski 2018-03-11 11:34:47 +0100 393) return ksys_sync_file_range(fd, offset, nbytes, flags);
edd5cd4a9424f (David Woodhouse 2007-06-27 14:10:09 -0700 394) }