^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * fs/f2fs/super.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2012 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * http://www.samsung.com/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/statfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/backing-dev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/parser.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/exportfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/quotaops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/f2fs_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/quota.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/unicode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/part_stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/zstd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/lz4.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "f2fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "node.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "segment.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "xattr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "gc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <trace/events/f2fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static struct kmem_cache *f2fs_inode_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #ifdef CONFIG_F2FS_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) const char *f2fs_fault_name[FAULT_MAX] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) [FAULT_KMALLOC] = "kmalloc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) [FAULT_KVMALLOC] = "kvmalloc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) [FAULT_PAGE_ALLOC] = "page alloc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) [FAULT_PAGE_GET] = "page get",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) [FAULT_ALLOC_NID] = "alloc nid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) [FAULT_ORPHAN] = "orphan",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) [FAULT_BLOCK] = "no more block",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) [FAULT_DIR_DEPTH] = "too big dir depth",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) [FAULT_EVICT_INODE] = "evict_inode fail",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) [FAULT_TRUNCATE] = "truncate fail",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) [FAULT_READ_IO] = "read IO error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) [FAULT_CHECKPOINT] = "checkpoint error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) [FAULT_DISCARD] = "discard error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) [FAULT_WRITE_IO] = "write IO error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (rate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) atomic_set(&ffi->inject_ops, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ffi->inject_rate = rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ffi->inject_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!rate && !type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) memset(ffi, 0, sizeof(struct f2fs_fault_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* f2fs-wide shrinker description */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static struct shrinker f2fs_shrinker_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) .scan_objects = f2fs_shrink_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) .count_objects = f2fs_shrink_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) .seeks = DEFAULT_SEEKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) Opt_gc_background,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) Opt_disable_roll_forward,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) Opt_norecovery,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) Opt_discard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) Opt_nodiscard,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) Opt_noheap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) Opt_heap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) Opt_user_xattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) Opt_nouser_xattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) Opt_acl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) Opt_noacl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) Opt_active_logs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) Opt_disable_ext_identify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) Opt_inline_xattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) Opt_noinline_xattr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) Opt_inline_xattr_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) Opt_inline_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) Opt_inline_dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) Opt_noinline_dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) Opt_flush_merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) Opt_noflush_merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) Opt_nobarrier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) Opt_fastboot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) Opt_extent_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) Opt_noextent_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) Opt_noinline_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) Opt_data_flush,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) Opt_reserve_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) Opt_resgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) Opt_resuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) Opt_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) Opt_io_size_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) Opt_fault_injection,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) Opt_fault_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) Opt_lazytime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) Opt_nolazytime,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) Opt_quota,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) Opt_noquota,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) Opt_usrquota,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) Opt_grpquota,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) Opt_prjquota,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) Opt_usrjquota,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) Opt_grpjquota,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) Opt_prjjquota,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) Opt_offusrjquota,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) Opt_offgrpjquota,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) Opt_offprjjquota,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) Opt_jqfmt_vfsold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) Opt_jqfmt_vfsv0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) Opt_jqfmt_vfsv1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) Opt_whint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) Opt_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) Opt_fsync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) Opt_test_dummy_encryption,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) Opt_inlinecrypt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) Opt_checkpoint_disable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) Opt_checkpoint_disable_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) Opt_checkpoint_disable_cap_perc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) Opt_checkpoint_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) Opt_checkpoint_merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) Opt_nocheckpoint_merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) Opt_compress_algorithm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) Opt_compress_log_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) Opt_compress_extension,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) Opt_compress_chksum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) Opt_compress_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) Opt_compress_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) Opt_atgc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) Opt_gc_merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) Opt_nogc_merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) Opt_err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static match_table_t f2fs_tokens = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {Opt_gc_background, "background_gc=%s"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {Opt_disable_roll_forward, "disable_roll_forward"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {Opt_norecovery, "norecovery"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {Opt_discard, "discard"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {Opt_nodiscard, "nodiscard"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {Opt_noheap, "no_heap"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {Opt_heap, "heap"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {Opt_user_xattr, "user_xattr"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {Opt_nouser_xattr, "nouser_xattr"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {Opt_acl, "acl"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {Opt_noacl, "noacl"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {Opt_active_logs, "active_logs=%u"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {Opt_disable_ext_identify, "disable_ext_identify"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {Opt_inline_xattr, "inline_xattr"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {Opt_noinline_xattr, "noinline_xattr"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {Opt_inline_xattr_size, "inline_xattr_size=%u"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {Opt_inline_data, "inline_data"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {Opt_inline_dentry, "inline_dentry"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {Opt_noinline_dentry, "noinline_dentry"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {Opt_flush_merge, "flush_merge"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {Opt_noflush_merge, "noflush_merge"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {Opt_nobarrier, "nobarrier"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {Opt_fastboot, "fastboot"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {Opt_extent_cache, "extent_cache"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {Opt_noextent_cache, "noextent_cache"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {Opt_noinline_data, "noinline_data"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {Opt_data_flush, "data_flush"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {Opt_reserve_root, "reserve_root=%u"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {Opt_resgid, "resgid=%u"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {Opt_resuid, "resuid=%u"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {Opt_mode, "mode=%s"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {Opt_io_size_bits, "io_bits=%u"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {Opt_fault_injection, "fault_injection=%u"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {Opt_fault_type, "fault_type=%u"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {Opt_lazytime, "lazytime"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {Opt_nolazytime, "nolazytime"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {Opt_quota, "quota"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {Opt_noquota, "noquota"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {Opt_usrquota, "usrquota"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {Opt_grpquota, "grpquota"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {Opt_prjquota, "prjquota"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {Opt_usrjquota, "usrjquota=%s"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {Opt_grpjquota, "grpjquota=%s"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {Opt_prjjquota, "prjjquota=%s"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {Opt_offusrjquota, "usrjquota="},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {Opt_offgrpjquota, "grpjquota="},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {Opt_offprjjquota, "prjjquota="},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {Opt_whint, "whint_mode=%s"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {Opt_alloc, "alloc_mode=%s"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {Opt_fsync, "fsync_mode=%s"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {Opt_test_dummy_encryption, "test_dummy_encryption=%s"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {Opt_test_dummy_encryption, "test_dummy_encryption"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {Opt_inlinecrypt, "inlinecrypt"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {Opt_checkpoint_disable, "checkpoint=disable"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {Opt_checkpoint_enable, "checkpoint=enable"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {Opt_checkpoint_merge, "checkpoint_merge"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {Opt_nocheckpoint_merge, "nocheckpoint_merge"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {Opt_compress_algorithm, "compress_algorithm=%s"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {Opt_compress_log_size, "compress_log_size=%u"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {Opt_compress_extension, "compress_extension=%s"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {Opt_compress_chksum, "compress_chksum"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {Opt_compress_mode, "compress_mode=%s"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {Opt_compress_cache, "compress_cache"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {Opt_atgc, "atgc"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {Opt_gc_merge, "gc_merge"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {Opt_nogc_merge, "nogc_merge"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {Opt_err, NULL},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct va_format vaf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) va_list args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) va_start(args, fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) level = printk_get_level(fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) vaf.fmt = printk_skip_level(fmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) vaf.va = &args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) printk("%c%cF2FS-fs (%s): %pV\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) va_end(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #ifdef CONFIG_UNICODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static const struct f2fs_sb_encodings {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) __u16 magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) char *version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) } f2fs_sb_encoding_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {F2FS_ENC_UTF8_12_1, "utf8", "12.1.0"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) const struct f2fs_sb_encodings **encoding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) __u16 *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) __u16 magic = le16_to_cpu(sb->s_encoding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (magic == f2fs_sb_encoding_map[i].magic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (i >= ARRAY_SIZE(f2fs_sb_encoding_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) *encoding = &f2fs_sb_encoding_map[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) *flags = le16_to_cpu(sb->s_encoding_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct kmem_cache *f2fs_cf_name_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static int __init f2fs_create_casefold_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) f2fs_cf_name_slab = f2fs_kmem_cache_create("f2fs_casefolded_name",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) F2FS_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (!f2fs_cf_name_slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void f2fs_destroy_casefold_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) kmem_cache_destroy(f2fs_cf_name_slab);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static int __init f2fs_create_casefold_cache(void) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static void f2fs_destroy_casefold_cache(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) block_t limit = min((sbi->user_block_count << 1) / 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) sbi->user_block_count - sbi->reserved_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* limit is 0.2% */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (test_opt(sbi, RESERVE_ROOT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) F2FS_OPTION(sbi).root_reserved_blocks > limit &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) F2FS_OPTION(sbi).root_reserved_blocks > MIN_ROOT_RESERVED_BLOCKS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) F2FS_OPTION(sbi).root_reserved_blocks = limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) f2fs_info(sbi, "Reduce reserved blocks for root = %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) F2FS_OPTION(sbi).root_reserved_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (!test_opt(sbi, RESERVE_ROOT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) (!uid_eq(F2FS_OPTION(sbi).s_resuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) !gid_eq(F2FS_OPTION(sbi).s_resgid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) from_kuid_munged(&init_user_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) F2FS_OPTION(sbi).s_resuid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) from_kgid_munged(&init_user_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) F2FS_OPTION(sbi).s_resgid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) unsigned int avg_vblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) unsigned int wanted_reserved_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) block_t avail_user_block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!F2FS_IO_ALIGNED(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* average valid block count in section in worst case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * we need enough free space when migrating one section in worst case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) reserved_segments(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) wanted_reserved_segments -= reserved_segments(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) avail_user_block_count = sbi->user_block_count -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) sbi->current_reserved_blocks -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) F2FS_OPTION(sbi).root_reserved_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (wanted_reserved_segments * sbi->blocks_per_seg >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) avail_user_block_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) wanted_reserved_segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) avail_user_block_count >> sbi->log_blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) f2fs_info(sbi, "IO align feature needs additional reserved segment: %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) wanted_reserved_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!F2FS_OPTION(sbi).unusable_cap_perc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) F2FS_OPTION(sbi).unusable_cap_perc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) f2fs_info(sbi, "Adjust unusable cap for checkpoint=disable = %u / %u%%",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) F2FS_OPTION(sbi).unusable_cap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) F2FS_OPTION(sbi).unusable_cap_perc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static void init_once(void *foo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct f2fs_inode_info *fi = (struct f2fs_inode_info *) foo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) inode_init_once(&fi->vfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static const char * const quotatypes[] = INITQFNAMES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) #define QTYPE2NAME(t) (quotatypes[t])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static int f2fs_set_qf_name(struct super_block *sb, int qtype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) substring_t *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) char *qname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (f2fs_sb_has_quota_ino(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) qname = match_strdup(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (!qname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) f2fs_err(sbi, "Not enough memory for storing quotafile name");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) f2fs_err(sbi, "%s quota file already specified",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) QTYPE2NAME(qtype));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (strchr(qname, '/')) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) f2fs_err(sbi, "quotafile must be on filesystem root");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) set_opt(sbi, QUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) kfree(qname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) kfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) F2FS_OPTION(sbi).s_qf_names[qtype] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * We do the test below only for project quotas. 'usrquota' and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * 'grpquota' mount options are allowed even without quota feature
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * to support legacy quotas in quota files.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (test_opt(sbi, USRQUOTA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) clear_opt(sbi, USRQUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (test_opt(sbi, GRPQUOTA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) clear_opt(sbi, GRPQUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (test_opt(sbi, PRJQUOTA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) clear_opt(sbi, PRJQUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) test_opt(sbi, PRJQUOTA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) f2fs_err(sbi, "old and new quota format mixing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (!F2FS_OPTION(sbi).s_jquota_fmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) f2fs_err(sbi, "journaled quota format not specified");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) F2FS_OPTION(sbi).s_jquota_fmt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) static int f2fs_set_test_dummy_encryption(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) const char *opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) const substring_t *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) bool is_remount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) #ifdef CONFIG_FS_ENCRYPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (!f2fs_sb_has_encrypt(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) f2fs_err(sbi, "Encrypt feature is off");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * This mount option is just for testing, and it's not worthwhile to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * implement the extra complexity (e.g. RCU protection) that would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * needed to allow it to be set or changed during remount. We do allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * it to be specified during remount, but only if there is no change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (is_remount && !F2FS_OPTION(sbi).dummy_enc_policy.policy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) f2fs_warn(sbi, "Can't set test_dummy_encryption on remount");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) err = fscrypt_set_test_dummy_encryption(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) sb, arg->from, &F2FS_OPTION(sbi).dummy_enc_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (err == -EEXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) f2fs_warn(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) "Can't change test_dummy_encryption on remount");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) else if (err == -EINVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) f2fs_warn(sbi, "Value of option \"%s\" is unrecognized",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) f2fs_warn(sbi, "Error processing option \"%s\" [%d]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) opt, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) f2fs_warn(sbi, "Test dummy encryption mode enabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) f2fs_warn(sbi, "Test dummy encryption mount option ignored");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) #ifdef CONFIG_F2FS_FS_LZ4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static int f2fs_set_lz4hc_level(struct f2fs_sb_info *sbi, const char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) #ifdef CONFIG_F2FS_FS_LZ4HC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) unsigned int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (strlen(str) == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) F2FS_OPTION(sbi).compress_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) #ifdef CONFIG_F2FS_FS_LZ4HC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) str += 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (str[0] != ':') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (kstrtouint(str + 1, 10, &level))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (level < LZ4HC_MIN_CLEVEL || level > LZ4HC_MAX_CLEVEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) f2fs_info(sbi, "invalid lz4hc compress level: %d", level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) F2FS_OPTION(sbi).compress_level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) f2fs_info(sbi, "kernel doesn't support lz4hc compression");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #ifdef CONFIG_F2FS_FS_ZSTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) unsigned int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) int len = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (strlen(str) == len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) F2FS_OPTION(sbi).compress_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) str += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (str[0] != ':') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) f2fs_info(sbi, "wrong format, e.g. <alg_name>:<compr_level>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (kstrtouint(str + 1, 10, &level))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (!level || level > ZSTD_maxCLevel()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) f2fs_info(sbi, "invalid zstd compress level: %d", level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) F2FS_OPTION(sbi).compress_level = level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static int parse_options(struct super_block *sb, char *options, bool is_remount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) substring_t args[MAX_OPT_ARGS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) unsigned char (*ext)[F2FS_EXTENSION_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) int ext_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) char *p, *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int arg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) kuid_t uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) kgid_t gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (!options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) goto default_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) while ((p = strsep(&options, ",")) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) int token;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (!*p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * Initialize args struct so we know whether arg was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * found; some options take optional arguments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) args[0].to = args[0].from = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) token = match_token(p, f2fs_tokens, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) switch (token) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) case Opt_gc_background:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) name = match_strdup(&args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (!strcmp(name, "on")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) } else if (!strcmp(name, "off")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) } else if (!strcmp(name, "sync")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) case Opt_disable_roll_forward:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) set_opt(sbi, DISABLE_ROLL_FORWARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) case Opt_norecovery:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) /* this option mounts f2fs with ro */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) set_opt(sbi, NORECOVERY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (!f2fs_readonly(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) case Opt_discard:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) set_opt(sbi, DISCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) case Opt_nodiscard:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (f2fs_sb_has_blkzoned(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) f2fs_warn(sbi, "discard is required for zoned block devices");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) clear_opt(sbi, DISCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) case Opt_noheap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) set_opt(sbi, NOHEAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) case Opt_heap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) clear_opt(sbi, NOHEAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) #ifdef CONFIG_F2FS_FS_XATTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) case Opt_user_xattr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) set_opt(sbi, XATTR_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) case Opt_nouser_xattr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) clear_opt(sbi, XATTR_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) case Opt_inline_xattr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) set_opt(sbi, INLINE_XATTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) case Opt_noinline_xattr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) clear_opt(sbi, INLINE_XATTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) case Opt_inline_xattr_size:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (args->from && match_int(args, &arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) set_opt(sbi, INLINE_XATTR_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) F2FS_OPTION(sbi).inline_xattr_size = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) case Opt_user_xattr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) f2fs_info(sbi, "user_xattr options not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) case Opt_nouser_xattr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) f2fs_info(sbi, "nouser_xattr options not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) case Opt_inline_xattr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) f2fs_info(sbi, "inline_xattr options not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) case Opt_noinline_xattr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) f2fs_info(sbi, "noinline_xattr options not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) #ifdef CONFIG_F2FS_FS_POSIX_ACL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) case Opt_acl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) set_opt(sbi, POSIX_ACL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) case Opt_noacl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) clear_opt(sbi, POSIX_ACL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) case Opt_acl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) f2fs_info(sbi, "acl options not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) case Opt_noacl:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) f2fs_info(sbi, "noacl options not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) case Opt_active_logs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (args->from && match_int(args, &arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (arg != 2 && arg != 4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) arg != NR_CURSEG_PERSIST_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) F2FS_OPTION(sbi).active_logs = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) case Opt_disable_ext_identify:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) set_opt(sbi, DISABLE_EXT_IDENTIFY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) case Opt_inline_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) set_opt(sbi, INLINE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) case Opt_inline_dentry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) set_opt(sbi, INLINE_DENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) case Opt_noinline_dentry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) clear_opt(sbi, INLINE_DENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) case Opt_flush_merge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) set_opt(sbi, FLUSH_MERGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) case Opt_noflush_merge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) clear_opt(sbi, FLUSH_MERGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) case Opt_nobarrier:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) set_opt(sbi, NOBARRIER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) case Opt_fastboot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) set_opt(sbi, FASTBOOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) case Opt_extent_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) set_opt(sbi, EXTENT_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) case Opt_noextent_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) clear_opt(sbi, EXTENT_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) case Opt_noinline_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) clear_opt(sbi, INLINE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) case Opt_data_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) set_opt(sbi, DATA_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) case Opt_reserve_root:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (args->from && match_int(args, &arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) if (test_opt(sbi, RESERVE_ROOT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) f2fs_info(sbi, "Preserve previous reserve_root=%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) F2FS_OPTION(sbi).root_reserved_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) F2FS_OPTION(sbi).root_reserved_blocks = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) set_opt(sbi, RESERVE_ROOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) case Opt_resuid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (args->from && match_int(args, &arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) uid = make_kuid(current_user_ns(), arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (!uid_valid(uid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) f2fs_err(sbi, "Invalid uid value %d", arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) F2FS_OPTION(sbi).s_resuid = uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) case Opt_resgid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (args->from && match_int(args, &arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) gid = make_kgid(current_user_ns(), arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) if (!gid_valid(gid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) f2fs_err(sbi, "Invalid gid value %d", arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) F2FS_OPTION(sbi).s_resgid = gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) case Opt_mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) name = match_strdup(&args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (!strcmp(name, "adaptive")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (f2fs_sb_has_blkzoned(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) } else if (!strcmp(name, "lfs")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) case Opt_io_size_bits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (args->from && match_int(args, &arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) f2fs_warn(sbi, "Not support %d, larger than %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 1 << arg, BIO_MAX_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) F2FS_OPTION(sbi).write_io_size_bits = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) #ifdef CONFIG_F2FS_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) case Opt_fault_injection:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (args->from && match_int(args, &arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) set_opt(sbi, FAULT_INJECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) case Opt_fault_type:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) if (args->from && match_int(args, &arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) f2fs_build_fault_attr(sbi, 0, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) set_opt(sbi, FAULT_INJECTION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) case Opt_fault_injection:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) f2fs_info(sbi, "fault_injection options not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) case Opt_fault_type:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) f2fs_info(sbi, "fault_type options not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) case Opt_lazytime:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) sb->s_flags |= SB_LAZYTIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) case Opt_nolazytime:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) sb->s_flags &= ~SB_LAZYTIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) case Opt_quota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) case Opt_usrquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) set_opt(sbi, USRQUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) case Opt_grpquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) set_opt(sbi, GRPQUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) case Opt_prjquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) set_opt(sbi, PRJQUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) case Opt_usrjquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) ret = f2fs_set_qf_name(sb, USRQUOTA, &args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) case Opt_grpjquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) ret = f2fs_set_qf_name(sb, GRPQUOTA, &args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) case Opt_prjjquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) ret = f2fs_set_qf_name(sb, PRJQUOTA, &args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) case Opt_offusrjquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) ret = f2fs_clear_qf_name(sb, USRQUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) case Opt_offgrpjquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ret = f2fs_clear_qf_name(sb, GRPQUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) case Opt_offprjjquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) ret = f2fs_clear_qf_name(sb, PRJQUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) case Opt_jqfmt_vfsold:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_OLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) case Opt_jqfmt_vfsv0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) case Opt_jqfmt_vfsv1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) F2FS_OPTION(sbi).s_jquota_fmt = QFMT_VFS_V1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) case Opt_noquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) clear_opt(sbi, QUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) clear_opt(sbi, USRQUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) clear_opt(sbi, GRPQUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) clear_opt(sbi, PRJQUOTA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) case Opt_quota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) case Opt_usrquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) case Opt_grpquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) case Opt_prjquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) case Opt_usrjquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) case Opt_grpjquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) case Opt_prjjquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) case Opt_offusrjquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) case Opt_offgrpjquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) case Opt_offprjjquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) case Opt_jqfmt_vfsold:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) case Opt_jqfmt_vfsv0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) case Opt_jqfmt_vfsv1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) case Opt_noquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) f2fs_info(sbi, "quota operations not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) case Opt_whint:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) name = match_strdup(&args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (!strcmp(name, "user-based")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) F2FS_OPTION(sbi).whint_mode = WHINT_MODE_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) } else if (!strcmp(name, "off")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) } else if (!strcmp(name, "fs-based")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) case Opt_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) name = match_strdup(&args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (!strcmp(name, "default")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) } else if (!strcmp(name, "reuse")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) case Opt_fsync:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) name = match_strdup(&args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (!strcmp(name, "posix")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) } else if (!strcmp(name, "strict")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_STRICT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) } else if (!strcmp(name, "nobarrier")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) F2FS_OPTION(sbi).fsync_mode =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) FSYNC_MODE_NOBARRIER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) case Opt_test_dummy_encryption:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) ret = f2fs_set_test_dummy_encryption(sb, p, &args[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) is_remount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) case Opt_inlinecrypt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) sb->s_flags |= SB_INLINECRYPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) f2fs_info(sbi, "inline encryption not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) case Opt_checkpoint_disable_cap_perc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (args->from && match_int(args, &arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) if (arg < 0 || arg > 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) F2FS_OPTION(sbi).unusable_cap_perc = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) set_opt(sbi, DISABLE_CHECKPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) case Opt_checkpoint_disable_cap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) if (args->from && match_int(args, &arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) F2FS_OPTION(sbi).unusable_cap = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) set_opt(sbi, DISABLE_CHECKPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) case Opt_checkpoint_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) set_opt(sbi, DISABLE_CHECKPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) case Opt_checkpoint_enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) clear_opt(sbi, DISABLE_CHECKPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) case Opt_checkpoint_merge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) set_opt(sbi, MERGE_CHECKPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) case Opt_nocheckpoint_merge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) clear_opt(sbi, MERGE_CHECKPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) case Opt_compress_algorithm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (!f2fs_sb_has_compression(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) f2fs_info(sbi, "Image doesn't support compression");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) name = match_strdup(&args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (!strcmp(name, "lzo")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) #ifdef CONFIG_F2FS_FS_LZO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) F2FS_OPTION(sbi).compress_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) F2FS_OPTION(sbi).compress_algorithm =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) COMPRESS_LZO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) f2fs_info(sbi, "kernel doesn't support lzo compression");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) } else if (!strncmp(name, "lz4", 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) #ifdef CONFIG_F2FS_FS_LZ4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) ret = f2fs_set_lz4hc_level(sbi, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) F2FS_OPTION(sbi).compress_algorithm =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) COMPRESS_LZ4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) f2fs_info(sbi, "kernel doesn't support lz4 compression");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) } else if (!strncmp(name, "zstd", 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) #ifdef CONFIG_F2FS_FS_ZSTD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) ret = f2fs_set_zstd_level(sbi, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) F2FS_OPTION(sbi).compress_algorithm =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) COMPRESS_ZSTD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) f2fs_info(sbi, "kernel doesn't support zstd compression");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) } else if (!strcmp(name, "lzo-rle")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) #ifdef CONFIG_F2FS_FS_LZORLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) F2FS_OPTION(sbi).compress_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) F2FS_OPTION(sbi).compress_algorithm =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) COMPRESS_LZORLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) f2fs_info(sbi, "kernel doesn't support lzorle compression");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) case Opt_compress_log_size:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (!f2fs_sb_has_compression(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) f2fs_info(sbi, "Image doesn't support compression");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (args->from && match_int(args, &arg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (arg < MIN_COMPRESS_LOG_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) arg > MAX_COMPRESS_LOG_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) f2fs_err(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) "Compress cluster log size is out of range");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) F2FS_OPTION(sbi).compress_log_size = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) case Opt_compress_extension:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) if (!f2fs_sb_has_compression(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) f2fs_info(sbi, "Image doesn't support compression");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) name = match_strdup(&args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) ext = F2FS_OPTION(sbi).extensions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) if (strlen(name) >= F2FS_EXTENSION_LEN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) ext_cnt >= COMPRESS_EXT_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) f2fs_err(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) "invalid extension length/number");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) strcpy(ext[ext_cnt], name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) F2FS_OPTION(sbi).compress_ext_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) case Opt_compress_chksum:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) F2FS_OPTION(sbi).compress_chksum = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) case Opt_compress_mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) name = match_strdup(&args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (!strcmp(name, "fs")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) } else if (!strcmp(name, "user")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) F2FS_OPTION(sbi).compress_mode = COMPR_MODE_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) kfree(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) case Opt_compress_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) set_opt(sbi, COMPRESS_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) case Opt_compress_algorithm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) case Opt_compress_log_size:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) case Opt_compress_extension:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) case Opt_compress_chksum:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) case Opt_compress_mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) case Opt_compress_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) f2fs_info(sbi, "compression options not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) case Opt_atgc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) set_opt(sbi, ATGC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) case Opt_gc_merge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) set_opt(sbi, GC_MERGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) case Opt_nogc_merge:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) clear_opt(sbi, GC_MERGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) default_check:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (f2fs_check_quota_options(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) #ifndef CONFIG_UNICODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) if (f2fs_sb_has_casefold(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) f2fs_err(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * The BLKZONED feature indicates that the drive was formatted with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * zone alignment optimization. This is optional for host-aware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * devices, but mandatory for host-managed zoned block devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) #ifndef CONFIG_BLK_DEV_ZONED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (f2fs_sb_has_blkzoned(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) f2fs_err(sbi, "Zoned block device support is not enabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (F2FS_IO_SIZE_BITS(sbi) && !f2fs_lfs_mode(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) F2FS_IO_SIZE_KB(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (test_opt(sbi, INLINE_XATTR_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) int min_size, max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (!f2fs_sb_has_extra_attr(sbi) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) !f2fs_sb_has_flexible_inline_xattr(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) if (!test_opt(sbi, INLINE_XATTR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) min_size = sizeof(struct f2fs_xattr_header) / sizeof(__le32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) max_size = MAX_INLINE_XATTR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) F2FS_OPTION(sbi).inline_xattr_size > max_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) min_size, max_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (test_opt(sbi, DISABLE_CHECKPOINT) && f2fs_lfs_mode(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) f2fs_err(sbi, "LFS not compatible with checkpoint=disable");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) /* Not pass down write hints if the number of active logs is lesser
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * than NR_CURSEG_PERSIST_TYPE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_PERSIST_TYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) f2fs_err(sbi, "Allow to mount readonly mode only");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) static struct inode *f2fs_alloc_inode(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct f2fs_inode_info *fi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) fi = kmem_cache_alloc(f2fs_inode_cachep, GFP_F2FS_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (!fi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) init_once((void *) fi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) /* Initialize f2fs-specific inode info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) atomic_set(&fi->dirty_pages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) atomic_set(&fi->i_compr_blocks, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) init_f2fs_rwsem(&fi->i_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) spin_lock_init(&fi->i_size_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) INIT_LIST_HEAD(&fi->dirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) INIT_LIST_HEAD(&fi->gdirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) INIT_LIST_HEAD(&fi->inmem_ilist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) INIT_LIST_HEAD(&fi->inmem_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) mutex_init(&fi->inmem_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) init_f2fs_rwsem(&fi->i_mmap_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) init_f2fs_rwsem(&fi->i_xattr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) /* Will be used by directory only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) fi->i_dir_level = F2FS_SB(sb)->dir_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) return &fi->vfs_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) static int f2fs_drop_inode(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * during filesystem shutdown, if checkpoint is disabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * drop useless meta/node dirty pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (inode->i_ino == F2FS_NODE_INO(sbi) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) inode->i_ino == F2FS_META_INO(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) trace_f2fs_drop_inode(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * This is to avoid a deadlock condition like below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * writeback_single_inode(inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * - f2fs_write_data_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * - f2fs_gc -> iput -> evict
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * - inode_wait_for_writeback(inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!inode->i_nlink && !is_bad_inode(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /* to avoid evict_inode call simultaneously */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) atomic_inc(&inode->i_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) spin_unlock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) /* some remained atomic pages should discarded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) if (f2fs_is_atomic_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) f2fs_drop_inmem_pages(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* should remain fi->extent_tree for writepage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) f2fs_destroy_extent_node(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) sb_start_intwrite(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) f2fs_i_size_write(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) inode, NULL, 0, DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) truncate_inode_pages_final(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (F2FS_HAS_BLOCKS(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) f2fs_truncate(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) sb_end_intwrite(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) spin_lock(&inode->i_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) atomic_dec(&inode->i_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) trace_f2fs_drop_inode(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) ret = generic_drop_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) ret = fscrypt_drop_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) trace_f2fs_drop_inode(inode, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) int f2fs_inode_dirtied(struct inode *inode, bool sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) spin_lock(&sbi->inode_lock[DIRTY_META]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) set_inode_flag(inode, FI_DIRTY_INODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) stat_inc_dirty_inode(sbi, DIRTY_META);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) list_add_tail(&F2FS_I(inode)->gdirty_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) &sbi->inode_list[DIRTY_META]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) inc_page_count(sbi, F2FS_DIRTY_IMETA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) spin_unlock(&sbi->inode_lock[DIRTY_META]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) void f2fs_inode_synced(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) spin_lock(&sbi->inode_lock[DIRTY_META]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) spin_unlock(&sbi->inode_lock[DIRTY_META]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) list_del_init(&F2FS_I(inode)->gdirty_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) dec_page_count(sbi, F2FS_DIRTY_IMETA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) clear_inode_flag(inode, FI_DIRTY_INODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) clear_inode_flag(inode, FI_AUTO_RECOVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) spin_unlock(&sbi->inode_lock[DIRTY_META]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * f2fs_dirty_inode() is called from __mark_inode_dirty()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * We should call set_dirty_inode to write the dirty inode through write_inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static void f2fs_dirty_inode(struct inode *inode, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (inode->i_ino == F2FS_NODE_INO(sbi) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) inode->i_ino == F2FS_META_INO(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) if (flags == I_DIRTY_TIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) clear_inode_flag(inode, FI_AUTO_RECOVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) f2fs_inode_dirtied(inode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) static void f2fs_free_inode(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) fscrypt_free_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) static void destroy_percpu_info(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) percpu_counter_destroy(&sbi->alloc_valid_block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) percpu_counter_destroy(&sbi->total_valid_inode_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) static void destroy_device_list(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) for (i = 0; i < sbi->s_ndevs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) blkdev_put(FDEV(i).bdev, FMODE_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) #ifdef CONFIG_BLK_DEV_ZONED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) kvfree(FDEV(i).blkz_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) kfree(FDEV(i).zone_capacity_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) kvfree(sbi->devs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static void f2fs_put_super(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) bool dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) /* unregister procfs/sysfs entries in advance to avoid race case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) f2fs_unregister_sysfs(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) f2fs_quota_off_umount(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) /* prevent remaining shrinker jobs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) mutex_lock(&sbi->umount_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * flush all issued checkpoints and stop checkpoint issue thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * after then, all checkpoints should be done by each process context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) f2fs_stop_ckpt_thread(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * We don't need to do checkpoint when superblock is clean.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * But, the previous checkpoint was not done by umount, it needs to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * clean checkpoint again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) struct cp_control cpc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) .reason = CP_UMOUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) f2fs_write_checkpoint(sbi, &cpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) /* be sure to wait for any on-going discard commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) dropped = f2fs_issue_discard_timeout(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if ((f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) !sbi->discard_blks && !dropped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) struct cp_control cpc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) .reason = CP_UMOUNT | CP_TRIMMED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) f2fs_write_checkpoint(sbi, &cpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * normally superblock is clean, so we need to release this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * In addition, EIO will skip do checkpoint, we need this as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) f2fs_release_ino_entry(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) f2fs_leave_shrinker(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) mutex_unlock(&sbi->umount_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) /* our cp_error case, we can wait for any writeback page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) f2fs_flush_merged_writes(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) f2fs_bug_on(sbi, sbi->fsync_node_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) f2fs_destroy_compress_inode(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) iput(sbi->node_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) sbi->node_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) iput(sbi->meta_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) sbi->meta_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * iput() can update stat information, if f2fs_write_checkpoint()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * above failed with error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) f2fs_destroy_stats(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) /* destroy f2fs internal modules */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) f2fs_destroy_node_manager(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) f2fs_destroy_segment_manager(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) f2fs_destroy_post_read_wq(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) kvfree(sbi->ckpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) sb->s_fs_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (sbi->s_chksum_driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) crypto_free_shash(sbi->s_chksum_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) kfree(sbi->raw_super);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) destroy_device_list(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) f2fs_destroy_page_array_cache(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) f2fs_destroy_xattr_caches(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) mempool_destroy(sbi->write_io_dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) for (i = 0; i < MAXQUOTAS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) kfree(F2FS_OPTION(sbi).s_qf_names[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) destroy_percpu_info(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) for (i = 0; i < NR_PAGE_TYPE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) kvfree(sbi->write_io[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) #ifdef CONFIG_UNICODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) utf8_unload(sb->s_encoding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) kfree(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) int f2fs_sync_fs(struct super_block *sb, int sync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (unlikely(f2fs_cp_error(sbi)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) trace_f2fs_sync_fs(sb, sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (sync) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) err = f2fs_issue_checkpoint(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) atomic_set(&sbi->no_cp_fsync_pages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) static int f2fs_freeze(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (f2fs_readonly(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) /* IO error happened before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (unlikely(f2fs_cp_error(F2FS_SB(sb))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) /* must be clean, since sync_filesystem() was already called */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) /* ensure no checkpoint required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (!llist_empty(&F2FS_SB(sb)->cprc_info.issue_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) static int f2fs_unfreeze(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) static int f2fs_statfs_project(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) kprojid_t projid, struct kstatfs *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) struct kqid qid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) struct dquot *dquot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) u64 limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) u64 curblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) qid = make_kqid_projid(projid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) dquot = dqget(sb, qid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (IS_ERR(dquot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) return PTR_ERR(dquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) spin_lock(&dquot->dq_dqb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) dquot->dq_dqb.dqb_bhardlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) limit >>= sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (limit && buf->f_blocks > limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) curblock = (dquot->dq_dqb.dqb_curspace +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) buf->f_blocks = limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) buf->f_bfree = buf->f_bavail =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) (buf->f_blocks > curblock) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) (buf->f_blocks - curblock) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) dquot->dq_dqb.dqb_ihardlimit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) if (limit && buf->f_files > limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) buf->f_files = limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) buf->f_ffree =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) (buf->f_files > dquot->dq_dqb.dqb_curinodes) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) (buf->f_files - dquot->dq_dqb.dqb_curinodes) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) spin_unlock(&dquot->dq_dqb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) dqput(dquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) struct super_block *sb = dentry->d_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) block_t total_count, user_block_count, start_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) u64 avail_node_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) total_count = le64_to_cpu(sbi->raw_super->block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) user_block_count = sbi->user_block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) buf->f_type = F2FS_SUPER_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) buf->f_bsize = sbi->blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) /* f_blocks should not include overhead of filesystem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) buf->f_blocks = user_block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) sbi->current_reserved_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) spin_lock(&sbi->stat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) buf->f_bfree = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) buf->f_bfree -= sbi->unusable_block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) spin_unlock(&sbi->stat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) buf->f_bavail = buf->f_bfree -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) F2FS_OPTION(sbi).root_reserved_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) buf->f_bavail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (avail_node_count > user_block_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) buf->f_files = user_block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) buf->f_ffree = buf->f_bavail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) buf->f_files = avail_node_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) buf->f_ffree = min(avail_node_count - valid_node_count(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) buf->f_bavail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) buf->f_namelen = F2FS_NAME_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) buf->f_fsid = u64_to_fsid(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (is_inode_flag_set(dentry->d_inode, FI_PROJ_INHERIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) f2fs_statfs_project(sb, F2FS_I(dentry->d_inode)->i_projid, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) static inline void f2fs_show_quota_options(struct seq_file *seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (F2FS_OPTION(sbi).s_jquota_fmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) char *fmtname = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) switch (F2FS_OPTION(sbi).s_jquota_fmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) case QFMT_VFS_OLD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) fmtname = "vfsold";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) case QFMT_VFS_V0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) fmtname = "vfsv0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) case QFMT_VFS_V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) fmtname = "vfsv1";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) seq_printf(seq, ",jqfmt=%s", fmtname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) seq_show_option(seq, "usrjquota",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) seq_show_option(seq, "grpjquota",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) seq_show_option(seq, "prjjquota",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) static inline void f2fs_show_compress_options(struct seq_file *seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) char *algtype = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (!f2fs_sb_has_compression(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) switch (F2FS_OPTION(sbi).compress_algorithm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) case COMPRESS_LZO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) algtype = "lzo";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) case COMPRESS_LZ4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) algtype = "lz4";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) case COMPRESS_ZSTD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) algtype = "zstd";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) case COMPRESS_LZORLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) algtype = "lzo-rle";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) seq_printf(seq, ",compress_algorithm=%s", algtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (F2FS_OPTION(sbi).compress_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) seq_printf(seq, ":%d", F2FS_OPTION(sbi).compress_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) seq_printf(seq, ",compress_log_size=%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) F2FS_OPTION(sbi).compress_log_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) seq_printf(seq, ",compress_extension=%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) F2FS_OPTION(sbi).extensions[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (F2FS_OPTION(sbi).compress_chksum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) seq_puts(seq, ",compress_chksum");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) seq_printf(seq, ",compress_mode=%s", "fs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) seq_printf(seq, ",compress_mode=%s", "user");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) if (test_opt(sbi, COMPRESS_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) seq_puts(seq, ",compress_cache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) seq_printf(seq, ",background_gc=%s", "sync");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) seq_printf(seq, ",background_gc=%s", "on");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) seq_printf(seq, ",background_gc=%s", "off");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (test_opt(sbi, GC_MERGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) seq_puts(seq, ",gc_merge");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (test_opt(sbi, DISABLE_ROLL_FORWARD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) seq_puts(seq, ",disable_roll_forward");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (test_opt(sbi, NORECOVERY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) seq_puts(seq, ",norecovery");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (test_opt(sbi, DISCARD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) seq_puts(seq, ",discard");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) seq_puts(seq, ",nodiscard");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (test_opt(sbi, NOHEAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) seq_puts(seq, ",no_heap");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) seq_puts(seq, ",heap");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) #ifdef CONFIG_F2FS_FS_XATTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (test_opt(sbi, XATTR_USER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) seq_puts(seq, ",user_xattr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) seq_puts(seq, ",nouser_xattr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (test_opt(sbi, INLINE_XATTR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) seq_puts(seq, ",inline_xattr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) seq_puts(seq, ",noinline_xattr");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (test_opt(sbi, INLINE_XATTR_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) seq_printf(seq, ",inline_xattr_size=%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) F2FS_OPTION(sbi).inline_xattr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) #ifdef CONFIG_F2FS_FS_POSIX_ACL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (test_opt(sbi, POSIX_ACL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) seq_puts(seq, ",acl");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) seq_puts(seq, ",noacl");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) seq_puts(seq, ",disable_ext_identify");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (test_opt(sbi, INLINE_DATA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) seq_puts(seq, ",inline_data");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) seq_puts(seq, ",noinline_data");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (test_opt(sbi, INLINE_DENTRY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) seq_puts(seq, ",inline_dentry");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) seq_puts(seq, ",noinline_dentry");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (!f2fs_readonly(sbi->sb) && test_opt(sbi, FLUSH_MERGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) seq_puts(seq, ",flush_merge");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (test_opt(sbi, NOBARRIER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) seq_puts(seq, ",nobarrier");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) if (test_opt(sbi, FASTBOOT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) seq_puts(seq, ",fastboot");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (test_opt(sbi, EXTENT_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) seq_puts(seq, ",extent_cache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) seq_puts(seq, ",noextent_cache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (test_opt(sbi, DATA_FLUSH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) seq_puts(seq, ",data_flush");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) seq_puts(seq, ",mode=");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (F2FS_OPTION(sbi).fs_mode == FS_MODE_ADAPTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) seq_puts(seq, "adaptive");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) seq_puts(seq, "lfs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (test_opt(sbi, RESERVE_ROOT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) F2FS_OPTION(sbi).root_reserved_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) from_kuid_munged(&init_user_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) F2FS_OPTION(sbi).s_resuid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) from_kgid_munged(&init_user_ns,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) F2FS_OPTION(sbi).s_resgid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) if (F2FS_IO_SIZE_BITS(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) seq_printf(seq, ",io_bits=%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) F2FS_OPTION(sbi).write_io_size_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) #ifdef CONFIG_F2FS_FAULT_INJECTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) if (test_opt(sbi, FAULT_INJECTION)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) seq_printf(seq, ",fault_injection=%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) F2FS_OPTION(sbi).fault_info.inject_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) seq_printf(seq, ",fault_type=%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) F2FS_OPTION(sbi).fault_info.inject_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) if (test_opt(sbi, QUOTA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) seq_puts(seq, ",quota");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if (test_opt(sbi, USRQUOTA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) seq_puts(seq, ",usrquota");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) if (test_opt(sbi, GRPQUOTA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) seq_puts(seq, ",grpquota");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) if (test_opt(sbi, PRJQUOTA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) seq_puts(seq, ",prjquota");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) f2fs_show_quota_options(seq, sbi->sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) seq_printf(seq, ",whint_mode=%s", "user-based");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) seq_printf(seq, ",whint_mode=%s", "fs-based");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (sbi->sb->s_flags & SB_INLINECRYPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) seq_puts(seq, ",inlinecrypt");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) seq_printf(seq, ",alloc_mode=%s", "default");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) seq_printf(seq, ",alloc_mode=%s", "reuse");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) if (test_opt(sbi, DISABLE_CHECKPOINT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) seq_printf(seq, ",checkpoint=disable:%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) F2FS_OPTION(sbi).unusable_cap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) if (test_opt(sbi, MERGE_CHECKPOINT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) seq_puts(seq, ",checkpoint_merge");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) seq_puts(seq, ",nocheckpoint_merge");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) seq_printf(seq, ",fsync_mode=%s", "posix");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) seq_printf(seq, ",fsync_mode=%s", "strict");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) seq_printf(seq, ",fsync_mode=%s", "nobarrier");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) f2fs_show_compress_options(seq, sbi->sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (test_opt(sbi, ATGC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) seq_puts(seq, ",atgc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) static void default_options(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) /* init some FS parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (f2fs_sb_has_readonly(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) F2FS_OPTION(sbi).active_logs = NR_CURSEG_PERSIST_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) F2FS_OPTION(sbi).compress_algorithm = COMPRESS_LZ4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) F2FS_OPTION(sbi).compress_log_size = MIN_COMPRESS_LOG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) F2FS_OPTION(sbi).compress_ext_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) F2FS_OPTION(sbi).compress_mode = COMPR_MODE_FS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) sbi->sb->s_flags &= ~SB_INLINECRYPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) set_opt(sbi, INLINE_XATTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) set_opt(sbi, INLINE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) set_opt(sbi, INLINE_DENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) set_opt(sbi, EXTENT_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) set_opt(sbi, NOHEAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) clear_opt(sbi, DISABLE_CHECKPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) set_opt(sbi, MERGE_CHECKPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) F2FS_OPTION(sbi).unusable_cap = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) sbi->sb->s_flags |= SB_LAZYTIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) set_opt(sbi, FLUSH_MERGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) set_opt(sbi, DISCARD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (f2fs_sb_has_blkzoned(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) #ifdef CONFIG_F2FS_FS_XATTR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) set_opt(sbi, XATTR_USER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) #ifdef CONFIG_F2FS_FS_POSIX_ACL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) set_opt(sbi, POSIX_ACL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) f2fs_build_fault_attr(sbi, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) static int f2fs_enable_quotas(struct super_block *sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) unsigned int s_flags = sbi->sb->s_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) struct cp_control cpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) block_t unusable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (s_flags & SB_RDONLY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) f2fs_err(sbi, "checkpoint=disable on readonly fs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) sbi->sb->s_flags |= SB_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) f2fs_update_time(sbi, DISABLE_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) while (!f2fs_time_over(sbi, DISABLE_TIME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) f2fs_down_write(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) err = f2fs_gc(sbi, true, false, false, NULL_SEGNO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) if (err == -ENODATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (err && err != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) ret = sync_filesystem(sbi->sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (ret || err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) err = ret ? ret : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) goto restore_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) unusable = f2fs_get_unusable_blocks(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (f2fs_disable_cp_again(sbi, unusable)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) goto restore_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) f2fs_down_write(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) cpc.reason = CP_PAUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) set_sbi_flag(sbi, SBI_CP_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) err = f2fs_write_checkpoint(sbi, &cpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) spin_lock(&sbi->stat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) sbi->unusable_block_count = unusable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) spin_unlock(&sbi->stat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) f2fs_up_write(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) restore_flag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) int retry = DEFAULT_RETRY_IO_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) /* we should flush all the data to keep data consistency */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) sync_inodes_sb(sbi->sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) if (unlikely(retry < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) f2fs_down_write(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) f2fs_dirty_to_prefree(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) clear_sbi_flag(sbi, SBI_CP_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) set_sbi_flag(sbi, SBI_IS_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) f2fs_up_write(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) f2fs_sync_fs(sbi->sb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) static int f2fs_remount(struct super_block *sb, int *flags, char *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) struct f2fs_mount_info org_mount_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) unsigned long old_sb_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) bool need_restart_gc = false, need_stop_gc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) bool need_restart_ckpt = false, need_stop_ckpt = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) bool need_restart_flush = false, need_stop_flush = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) bool disable_checkpoint = test_opt(sbi, DISABLE_CHECKPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) bool no_io_align = !F2FS_IO_ALIGNED(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) bool no_atgc = !test_opt(sbi, ATGC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) bool no_compress_cache = !test_opt(sbi, COMPRESS_CACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) bool checkpoint_changed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) * Save the old mount options in case we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) * need to restore them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) org_mount_opt = sbi->mount_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) old_sb_flags = sb->s_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) org_mount_opt.s_jquota_fmt = F2FS_OPTION(sbi).s_jquota_fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) for (i = 0; i < MAXQUOTAS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if (F2FS_OPTION(sbi).s_qf_names[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) org_mount_opt.s_qf_names[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) kstrdup(F2FS_OPTION(sbi).s_qf_names[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) if (!org_mount_opt.s_qf_names[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) kfree(org_mount_opt.s_qf_names[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) org_mount_opt.s_qf_names[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) /* recover superblocks we couldn't write due to previous RO mount */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) err = f2fs_commit_super(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) default_options(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) /* parse mount options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) err = parse_options(sb, data, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) goto restore_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) checkpoint_changed =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) disable_checkpoint != test_opt(sbi, DISABLE_CHECKPOINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) * Previous and new state of filesystem is RO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) * so skip checking GC and FLUSH_MERGE conditions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (f2fs_sb_has_readonly(sbi) && !(*flags & SB_RDONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) err = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) goto restore_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) err = dquot_suspend(sb, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) goto restore_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) } else if (f2fs_readonly(sb) && !(*flags & SB_RDONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) /* dquot_resume needs RW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) sb->s_flags &= ~SB_RDONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) if (sb_any_quota_suspended(sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) dquot_resume(sb, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) } else if (f2fs_sb_has_quota_ino(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) err = f2fs_enable_quotas(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) goto restore_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) /* disallow enable atgc dynamically */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) if (no_atgc == !!test_opt(sbi, ATGC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) f2fs_warn(sbi, "switch atgc option is not allowed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) goto restore_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) /* disallow enable/disable extent_cache dynamically */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) f2fs_warn(sbi, "switch extent_cache option is not allowed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) goto restore_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (no_io_align == !!F2FS_IO_ALIGNED(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) f2fs_warn(sbi, "switch io_bits option is not allowed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) goto restore_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (no_compress_cache == !!test_opt(sbi, COMPRESS_CACHE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) f2fs_warn(sbi, "switch compress_cache option is not allowed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) goto restore_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) goto restore_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) * We stop the GC thread if FS is mounted as RO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) * or if background_gc = off is passed in mount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) * option. Also sync the filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if ((*flags & SB_RDONLY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) !test_opt(sbi, GC_MERGE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) if (sbi->gc_thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) f2fs_stop_gc_thread(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) need_restart_gc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) } else if (!sbi->gc_thread) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) err = f2fs_start_gc_thread(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) goto restore_opts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) need_stop_gc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) if (*flags & SB_RDONLY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) F2FS_OPTION(sbi).whint_mode != org_mount_opt.whint_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) sync_inodes_sb(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) set_sbi_flag(sbi, SBI_IS_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) set_sbi_flag(sbi, SBI_IS_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) f2fs_sync_fs(sb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) clear_sbi_flag(sbi, SBI_IS_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) !test_opt(sbi, MERGE_CHECKPOINT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) f2fs_stop_ckpt_thread(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) need_restart_ckpt = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) err = f2fs_start_ckpt_thread(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) f2fs_err(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) "Failed to start F2FS issue_checkpoint_thread (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) goto restore_gc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) need_stop_ckpt = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) * We stop issue flush thread if FS is mounted as RO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) * or if flush_merge is not passed in mount option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) clear_opt(sbi, FLUSH_MERGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) f2fs_destroy_flush_cmd_control(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) need_restart_flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) err = f2fs_create_flush_cmd_control(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) goto restore_ckpt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) need_stop_flush = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) if (checkpoint_changed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (test_opt(sbi, DISABLE_CHECKPOINT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) err = f2fs_disable_checkpoint(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) goto restore_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) f2fs_enable_checkpoint(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) /* Release old quota file names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) for (i = 0; i < MAXQUOTAS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) kfree(org_mount_opt.s_qf_names[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) /* Update the POSIXACL Flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) limit_reserve_root(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) adjust_unusable_cap_perc(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) restore_flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) if (need_restart_flush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) if (f2fs_create_flush_cmd_control(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) f2fs_warn(sbi, "background flush thread has stopped");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) } else if (need_stop_flush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) clear_opt(sbi, FLUSH_MERGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) f2fs_destroy_flush_cmd_control(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) restore_ckpt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) if (need_restart_ckpt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) if (f2fs_start_ckpt_thread(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) f2fs_warn(sbi, "background ckpt thread has stopped");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) } else if (need_stop_ckpt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) f2fs_stop_ckpt_thread(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) restore_gc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) if (need_restart_gc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) if (f2fs_start_gc_thread(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) f2fs_warn(sbi, "background gc thread has stopped");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) } else if (need_stop_gc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) f2fs_stop_gc_thread(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) restore_opts:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) for (i = 0; i < MAXQUOTAS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) kfree(F2FS_OPTION(sbi).s_qf_names[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) sbi->mount_opt = org_mount_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) sb->s_flags = old_sb_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) /* Read data from quotafile */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) static ssize_t f2fs_quota_read(struct super_block *sb, int type, char *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) size_t len, loff_t off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) struct inode *inode = sb_dqopt(sb)->files[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) block_t blkidx = F2FS_BYTES_TO_BLK(off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) int offset = off & (sb->s_blocksize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) int tocopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) size_t toread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) loff_t i_size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) char *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) if (off > i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) if (off + len > i_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) len = i_size - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) toread = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) while (toread > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) tocopy = min_t(unsigned long, sb->s_blocksize - offset, toread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) if (IS_ERR(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (PTR_ERR(page) == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) congestion_wait(BLK_RW_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) DEFAULT_IO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) return PTR_ERR(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) if (unlikely(page->mapping != mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (unlikely(!PageUptodate(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) memcpy(data, kaddr + offset, tocopy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) f2fs_put_page(page, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) toread -= tocopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) data += tocopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) blkidx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) /* Write to quotafile */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) static ssize_t f2fs_quota_write(struct super_block *sb, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) const char *data, size_t len, loff_t off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) struct inode *inode = sb_dqopt(sb)->files[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) const struct address_space_operations *a_ops = mapping->a_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) int offset = off & (sb->s_blocksize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) size_t towrite = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) void *fsdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) char *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) int tocopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) while (towrite > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) tocopy = min_t(unsigned long, sb->s_blocksize - offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) towrite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) err = a_ops->write_begin(NULL, mapping, off, tocopy, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) &page, &fsdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) if (unlikely(err)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) if (err == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) congestion_wait(BLK_RW_ASYNC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) DEFAULT_IO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) memcpy(kaddr + offset, data, tocopy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) a_ops->write_end(NULL, mapping, off, tocopy, tocopy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) page, fsdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) towrite -= tocopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) off += tocopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) data += tocopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) if (len == towrite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) inode->i_mtime = inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) f2fs_mark_inode_dirty_sync(inode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) return len - towrite;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) static struct dquot **f2fs_get_dquots(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) return F2FS_I(inode)->i_dquot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) static qsize_t *f2fs_get_reserved_space(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) return &F2FS_I(inode)->i_reserved_quota;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) return dquot_quota_on_mount(sbi->sb, F2FS_OPTION(sbi).s_qf_names[type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) F2FS_OPTION(sbi).s_jquota_fmt, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) int enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) err = f2fs_enable_quotas(sbi->sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) for (i = 0; i < MAXQUOTAS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) if (F2FS_OPTION(sbi).s_qf_names[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) err = f2fs_quota_on_mount(sbi, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) err, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) return enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) unsigned int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) struct inode *qf_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) unsigned long qf_inum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) BUG_ON(!f2fs_sb_has_quota_ino(F2FS_SB(sb)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) qf_inum = f2fs_qf_ino(sb, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) if (!qf_inum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) qf_inode = f2fs_iget(sb, qf_inum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) if (IS_ERR(qf_inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) return PTR_ERR(qf_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) /* Don't account quota for quota files to avoid recursion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) qf_inode->i_flags |= S_NOQUOTA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) err = dquot_load_quota_inode(qf_inode, type, format_id, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) iput(qf_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) static int f2fs_enable_quotas(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) int type, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) unsigned long qf_inum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) bool quota_mopt[MAXQUOTAS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) test_opt(sbi, USRQUOTA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) test_opt(sbi, GRPQUOTA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) test_opt(sbi, PRJQUOTA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) f2fs_err(sbi, "quota file may be corrupted, skip loading it");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) for (type = 0; type < MAXQUOTAS; type++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) qf_inum = f2fs_qf_ino(sb, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) if (qf_inum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) err = f2fs_quota_enable(sb, type, QFMT_VFS_V1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) DQUOT_USAGE_ENABLED |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) type, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) for (type--; type >= 0; type--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) dquot_quota_off(sb, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) set_sbi_flag(F2FS_SB(sb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) SBI_QUOTA_NEED_REPAIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) static int f2fs_quota_sync_file(struct f2fs_sb_info *sbi, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) struct quota_info *dqopt = sb_dqopt(sbi->sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) struct address_space *mapping = dqopt->files[type]->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) ret = dquot_writeback_dquots(sbi->sb, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) ret = filemap_fdatawrite(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) /* if we are using journalled quota */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) if (is_journalled_quota(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) ret = filemap_fdatawait(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) truncate_inode_pages(&dqopt->files[type]->i_data, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) int f2fs_quota_sync(struct super_block *sb, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) struct quota_info *dqopt = sb_dqopt(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) int cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) * Now when everything is written we can discard the pagecache so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) * that userspace sees the changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (type != -1 && cnt != type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) if (!sb_has_quota_active(sb, cnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) inode_lock(dqopt->files[cnt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) * do_quotactl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) * f2fs_quota_sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) * f2fs_down_read(quota_sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) * dquot_writeback_dquots()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) * f2fs_dquot_commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) * block_operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) * f2fs_down_read(quota_sem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) f2fs_lock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) f2fs_down_read(&sbi->quota_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) ret = f2fs_quota_sync_file(sbi, cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) f2fs_up_read(&sbi->quota_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) f2fs_unlock_op(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) inode_unlock(dqopt->files[cnt]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) static int f2fs_quota_on(struct super_block *sb, int type, int format_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) const struct path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) /* if quota sysfile exists, deny enabling quota with specific file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) if (f2fs_sb_has_quota_ino(F2FS_SB(sb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) f2fs_err(F2FS_SB(sb), "quota sysfile already exists");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) err = f2fs_quota_sync(sb, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) err = dquot_quota_on(sb, type, format_id, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) inode = d_inode(path->dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) F2FS_I(inode)->i_flags |= F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) f2fs_set_inode_flags(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) f2fs_mark_inode_dirty_sync(inode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) static int __f2fs_quota_off(struct super_block *sb, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) struct inode *inode = sb_dqopt(sb)->files[type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) if (!inode || !igrab(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) return dquot_quota_off(sb, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) err = f2fs_quota_sync(sb, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) err = dquot_quota_off(sb, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) if (err || f2fs_sb_has_quota_ino(F2FS_SB(sb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) goto out_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) F2FS_I(inode)->i_flags &= ~(F2FS_NOATIME_FL | F2FS_IMMUTABLE_FL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) f2fs_set_inode_flags(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) f2fs_mark_inode_dirty_sync(inode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) out_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) static int f2fs_quota_off(struct super_block *sb, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) err = __f2fs_quota_off(sb, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) * quotactl can shutdown journalled quota, result in inconsistence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) * between quota record and fs data by following updates, tag the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) * flag to let fsck be aware of it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) if (is_journalled_quota(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) void f2fs_quota_off_umount(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) for (type = 0; type < MAXQUOTAS; type++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) err = __f2fs_quota_off(sb, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) int ret = dquot_quota_off(sb, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) type, err, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) * In case of checkpoint=disable, we must flush quota blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) * This can cause NULL exception for node_inode in end_io, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) * put_super already dropped it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) sync_filesystem(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) struct quota_info *dqopt = sb_dqopt(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) for (type = 0; type < MAXQUOTAS; type++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) if (!dqopt->files[type])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) f2fs_inode_synced(dqopt->files[type]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) static int f2fs_dquot_commit(struct dquot *dquot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) ret = dquot_commit(dquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) f2fs_up_read(&sbi->quota_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) static int f2fs_dquot_acquire(struct dquot *dquot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) f2fs_down_read(&sbi->quota_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) ret = dquot_acquire(dquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) f2fs_up_read(&sbi->quota_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) static int f2fs_dquot_release(struct dquot *dquot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) int ret = dquot_release(dquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) struct super_block *sb = dquot->dq_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) int ret = dquot_mark_dquot_dirty(dquot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) /* if we are using journalled quota */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) if (is_journalled_quota(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) static int f2fs_dquot_commit_info(struct super_block *sb, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) int ret = dquot_commit_info(sb, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) static int f2fs_get_projid(struct inode *inode, kprojid_t *projid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) *projid = F2FS_I(inode)->i_projid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) static const struct dquot_operations f2fs_quota_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) .get_reserved_space = f2fs_get_reserved_space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) .write_dquot = f2fs_dquot_commit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) .acquire_dquot = f2fs_dquot_acquire,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) .release_dquot = f2fs_dquot_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) .mark_dirty = f2fs_dquot_mark_dquot_dirty,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) .write_info = f2fs_dquot_commit_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) .alloc_dquot = dquot_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) .destroy_dquot = dquot_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) .get_projid = f2fs_get_projid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) .get_next_id = dquot_get_next_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) static const struct quotactl_ops f2fs_quotactl_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) .quota_on = f2fs_quota_on,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) .quota_off = f2fs_quota_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) .quota_sync = f2fs_quota_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) .get_state = dquot_get_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) .set_info = dquot_set_dqinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) .get_dqblk = dquot_get_dqblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) .set_dqblk = dquot_set_dqblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) .get_nextdqblk = dquot_get_next_dqblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) int f2fs_quota_sync(struct super_block *sb, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) void f2fs_quota_off_umount(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) static const struct super_operations f2fs_sops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) .alloc_inode = f2fs_alloc_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) .free_inode = f2fs_free_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) .drop_inode = f2fs_drop_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) .write_inode = f2fs_write_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) .dirty_inode = f2fs_dirty_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) .show_options = f2fs_show_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) .quota_read = f2fs_quota_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) .quota_write = f2fs_quota_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) .get_dquots = f2fs_get_dquots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) .evict_inode = f2fs_evict_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) .put_super = f2fs_put_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) .sync_fs = f2fs_sync_fs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) .freeze_fs = f2fs_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) .unfreeze_fs = f2fs_unfreeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) .statfs = f2fs_statfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) .remount_fs = f2fs_remount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) #ifdef CONFIG_FS_ENCRYPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) static int f2fs_get_context(struct inode *inode, void *ctx, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) return f2fs_getxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) ctx, len, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) void *fs_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) * Encrypting the root directory is not allowed because fsck
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) * expects lost+found directory to exist and remain unencrypted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) * if LOST_FOUND feature is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) if (f2fs_sb_has_lost_found(sbi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) inode->i_ino == F2FS_ROOT_INO(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) return f2fs_setxattr(inode, F2FS_XATTR_INDEX_ENCRYPTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) F2FS_XATTR_NAME_ENCRYPTION_CONTEXT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) ctx, len, fs_data, XATTR_CREATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) static const union fscrypt_policy *f2fs_get_dummy_policy(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) return F2FS_OPTION(F2FS_SB(sb)).dummy_enc_policy.policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) static bool f2fs_has_stable_inodes(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) static void f2fs_get_ino_and_lblk_bits(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) int *ino_bits_ret, int *lblk_bits_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) *ino_bits_ret = 8 * sizeof(nid_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) *lblk_bits_ret = 8 * sizeof(block_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) static int f2fs_get_num_devices(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) if (f2fs_is_multi_device(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) return sbi->s_ndevs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) static void f2fs_get_devices(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) struct request_queue **devs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) for (i = 0; i < sbi->s_ndevs; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) devs[i] = bdev_get_queue(FDEV(i).bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) static const struct fscrypt_operations f2fs_cryptops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) .key_prefix = "f2fs:",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) .get_context = f2fs_get_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) .set_context = f2fs_set_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) .get_dummy_policy = f2fs_get_dummy_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) .empty_dir = f2fs_empty_dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) .max_namelen = F2FS_NAME_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) .has_stable_inodes = f2fs_has_stable_inodes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) .get_num_devices = f2fs_get_num_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) .get_devices = f2fs_get_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) u64 ino, u32 generation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) if (f2fs_check_nid_range(sbi, ino))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) return ERR_PTR(-ESTALE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) * f2fs_iget isn't quite right if the inode is currently unallocated!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) * However f2fs_iget currently does appropriate checks to handle stale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) * inodes so everything is OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) inode = f2fs_iget(sb, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) if (IS_ERR(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) return ERR_CAST(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) if (unlikely(generation && inode->i_generation != generation)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) /* we didn't find the right inode.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) return ERR_PTR(-ESTALE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) return inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) static struct dentry *f2fs_fh_to_dentry(struct super_block *sb, struct fid *fid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) int fh_len, int fh_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) f2fs_nfs_get_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) int fh_len, int fh_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) return generic_fh_to_parent(sb, fid, fh_len, fh_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) f2fs_nfs_get_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) static const struct export_operations f2fs_export_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) .fh_to_dentry = f2fs_fh_to_dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) .fh_to_parent = f2fs_fh_to_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) .get_parent = f2fs_get_parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) loff_t max_file_blocks(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) loff_t result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) loff_t leaf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) * DEFAULT_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) * space in inode.i_addr, it will be more safe to reassign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) * result as zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) if (inode && f2fs_compressed_file(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) leaf_count = ADDRS_PER_BLOCK(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) leaf_count = DEF_ADDRS_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) /* two direct node blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) result += (leaf_count * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) /* two indirect node blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) leaf_count *= NIDS_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) result += (leaf_count * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) /* one double indirect node block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) leaf_count *= NIDS_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) result += leaf_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) static int __f2fs_commit_super(struct buffer_head *bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) struct f2fs_super_block *super)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) if (super)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) set_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) /* it's rare case, we can do fua all the time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) return __sync_dirty_buffer(bh, REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) (bh->b_data + F2FS_SUPER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) struct super_block *sb = sbi->sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) u32 segment_count = le32_to_cpu(raw_super->segment_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) u64 main_end_blkaddr = main_blkaddr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) (segment_count_main << log_blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) u64 seg_end_blkaddr = segment0_blkaddr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) (segment_count << log_blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) if (segment0_blkaddr != cp_blkaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) segment0_blkaddr, cp_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) sit_blkaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) cp_blkaddr, sit_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) segment_count_ckpt << log_blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) nat_blkaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) sit_blkaddr, nat_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) segment_count_sit << log_blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) ssa_blkaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) nat_blkaddr, ssa_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) segment_count_nat << log_blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) main_blkaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) ssa_blkaddr, main_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) segment_count_ssa << log_blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) if (main_end_blkaddr > seg_end_blkaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%llu) block(%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) main_blkaddr, seg_end_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) segment_count_main << log_blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) } else if (main_end_blkaddr < seg_end_blkaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) char *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) /* fix in-memory information all the time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) raw_super->segment_count = cpu_to_le32((main_end_blkaddr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) segment0_blkaddr) >> log_blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) res = "internally";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) err = __f2fs_commit_super(bh, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) res = err ? "failed" : "done";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%llu) block(%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) res, main_blkaddr, seg_end_blkaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) segment_count_main << log_blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) block_t total_sections, blocks_per_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) (bh->b_data + F2FS_SUPER_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) size_t crc_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) __u32 crc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) /* Check checksum_offset and crc in superblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) crc_offset = le32_to_cpu(raw_super->checksum_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) if (crc_offset !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) offsetof(struct f2fs_super_block, crc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) f2fs_info(sbi, "Invalid SB checksum offset: %zu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) crc_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) crc = le32_to_cpu(raw_super->crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) /* Currently, support only 4KB block size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) if (le32_to_cpu(raw_super->log_blocksize) != F2FS_BLKSIZE_BITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) f2fs_info(sbi, "Invalid log_blocksize (%u), supports only %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) le32_to_cpu(raw_super->log_blocksize),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) F2FS_BLKSIZE_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) /* check log blocks per segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) f2fs_info(sbi, "Invalid log blocks per segment (%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) le32_to_cpu(raw_super->log_blocks_per_seg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) /* Currently, support 512/1024/2048/4096 bytes sector size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) if (le32_to_cpu(raw_super->log_sectorsize) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) F2FS_MAX_LOG_SECTOR_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) le32_to_cpu(raw_super->log_sectorsize) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) F2FS_MIN_LOG_SECTOR_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) f2fs_info(sbi, "Invalid log sectorsize (%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) le32_to_cpu(raw_super->log_sectorsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) if (le32_to_cpu(raw_super->log_sectors_per_block) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) le32_to_cpu(raw_super->log_sectorsize) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) F2FS_MAX_LOG_SECTOR_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) le32_to_cpu(raw_super->log_sectors_per_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) le32_to_cpu(raw_super->log_sectorsize));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) segment_count = le32_to_cpu(raw_super->segment_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) segment_count_main = le32_to_cpu(raw_super->segment_count_main);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) total_sections = le32_to_cpu(raw_super->section_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) /* blocks_per_seg should be 512, given the above check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) if (segment_count > F2FS_MAX_SEGMENT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) segment_count < F2FS_MIN_SEGMENTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) if (total_sections > segment_count_main || total_sections < 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) segs_per_sec > segment_count || !segs_per_sec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) segment_count, total_sections, segs_per_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) if (segment_count_main != total_sections * segs_per_sec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) f2fs_info(sbi, "Invalid segment/section count (%u != %u * %u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) segment_count_main, total_sections, segs_per_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) if ((segment_count / segs_per_sec) < total_sections) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) segment_count, segs_per_sec, total_sections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) segment_count, le64_to_cpu(raw_super->block_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) if (RDEV(0).path[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) block_t dev_seg_count = le32_to_cpu(RDEV(0).total_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) int i = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) while (i < MAX_DEVICES && RDEV(i).path[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) dev_seg_count += le32_to_cpu(RDEV(i).total_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) if (segment_count != dev_seg_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) f2fs_info(sbi, "Segment count (%u) mismatch with total segments from devices (%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) segment_count, dev_seg_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_BLKZONED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) !bdev_is_zoned(sbi->sb->s_bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) f2fs_info(sbi, "Zoned block device path is missing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) if (secs_per_zone > total_sections || !secs_per_zone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) secs_per_zone, total_sections);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) (le32_to_cpu(raw_super->extension_count) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) le32_to_cpu(raw_super->extension_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) raw_super->hot_ext_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) F2FS_MAX_EXTENSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) if (le32_to_cpu(raw_super->cp_payload) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) (blocks_per_seg - F2FS_CP_PACKS -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) NR_CURSEG_PERSIST_TYPE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) f2fs_info(sbi, "Insane cp_payload (%u >= %u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) le32_to_cpu(raw_super->cp_payload),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) blocks_per_seg - F2FS_CP_PACKS -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) NR_CURSEG_PERSIST_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) /* check reserved ino info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) if (le32_to_cpu(raw_super->node_ino) != 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) le32_to_cpu(raw_super->meta_ino) != 2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) le32_to_cpu(raw_super->root_ino) != 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) le32_to_cpu(raw_super->node_ino),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) le32_to_cpu(raw_super->meta_ino),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) le32_to_cpu(raw_super->root_ino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) if (sanity_check_area_boundary(sbi, bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) unsigned int total, fsmeta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) unsigned int ovp_segments, reserved_segments;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) unsigned int main_segs, blocks_per_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) unsigned int sit_segs, nat_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) unsigned int sit_bitmap_size, nat_bitmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) unsigned int log_blocks_per_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) unsigned int segment_count_main;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) unsigned int cp_pack_start_sum, cp_payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) block_t user_block_count, valid_user_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) block_t avail_node_count, valid_node_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) unsigned int nat_blocks, nat_bits_bytes, nat_bits_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) total = le32_to_cpu(raw_super->segment_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) sit_segs = le32_to_cpu(raw_super->segment_count_sit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) fsmeta += sit_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) nat_segs = le32_to_cpu(raw_super->segment_count_nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) fsmeta += nat_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) if (unlikely(fsmeta >= total))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) if (!f2fs_sb_has_readonly(sbi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) ovp_segments == 0 || reserved_segments == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) user_block_count = le64_to_cpu(ckpt->user_block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) (f2fs_sb_has_readonly(sbi) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) if (!user_block_count || user_block_count >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) segment_count_main << log_blocks_per_seg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) f2fs_err(sbi, "Wrong user_block_count: %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) user_block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) if (valid_user_blocks > user_block_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) valid_user_blocks, user_block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) valid_node_count = le32_to_cpu(ckpt->valid_node_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) if (valid_node_count > avail_node_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) valid_node_count, avail_node_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) main_segs = le32_to_cpu(raw_super->segment_count_main);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) blocks_per_seg = sbi->blocks_per_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) if (f2fs_sb_has_readonly(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) goto check_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) le32_to_cpu(ckpt->cur_node_segno[j])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) i, j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) le32_to_cpu(ckpt->cur_node_segno[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) check_data:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) if (f2fs_sb_has_readonly(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) goto skip_cross;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) le32_to_cpu(ckpt->cur_data_segno[j])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) i, j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) le32_to_cpu(ckpt->cur_data_segno[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) for (j = 0; j < NR_CURSEG_DATA_TYPE; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) le32_to_cpu(ckpt->cur_data_segno[j])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) f2fs_err(sbi, "Node segment (%u) and Data segment (%u) has the same segno: %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) i, j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) le32_to_cpu(ckpt->cur_node_segno[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) skip_cross:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) sit_bitmap_size, nat_bitmap_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) cp_pack_start_sum = __start_sum_addr(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) cp_payload = __cp_payload(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) if (cp_pack_start_sum < cp_payload + 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) cp_pack_start_sum > blocks_per_seg - 1 -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) NR_CURSEG_PERSIST_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) cp_pack_start_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) le32_to_cpu(ckpt->checksum_offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) nat_blocks = nat_segs << log_blocks_per_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) nat_bits_bytes = nat_blocks / BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) if (__is_set_ckpt_flags(ckpt, CP_NAT_BITS_FLAG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) (cp_payload + F2FS_CP_PACKS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) cp_payload, nat_bits_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) if (unlikely(f2fs_cp_error(sbi))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) f2fs_err(sbi, "A bug case: need to run fsck");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) static void init_sb_info(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) struct f2fs_super_block *raw_super = sbi->raw_super;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) sbi->log_sectors_per_block =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) le32_to_cpu(raw_super->log_sectors_per_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) sbi->log_blocksize = le32_to_cpu(raw_super->log_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) sbi->blocksize = 1 << sbi->log_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) sbi->log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) sbi->blocks_per_seg = 1 << sbi->log_blocks_per_seg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) sbi->segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) sbi->secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) sbi->total_sections = le32_to_cpu(raw_super->section_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) sbi->total_node_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) (le32_to_cpu(raw_super->segment_count_nat) / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) * sbi->blocks_per_seg * NAT_ENTRY_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) F2FS_ROOT_INO(sbi) = le32_to_cpu(raw_super->root_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) F2FS_NODE_INO(sbi) = le32_to_cpu(raw_super->node_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) F2FS_META_INO(sbi) = le32_to_cpu(raw_super->meta_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) sbi->cur_victim_sec = NULL_SECNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) sbi->next_victim_seg[BG_GC] = NULL_SEGNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) sbi->next_victim_seg[FG_GC] = NULL_SEGNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) sbi->migration_granularity = sbi->segs_per_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) sbi->dir_level = DEF_DIR_LEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) sbi->interval_time[DISCARD_TIME] = DEF_IDLE_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) sbi->interval_time[GC_TIME] = DEF_IDLE_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) sbi->interval_time[UMOUNT_DISCARD_TIMEOUT] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) DEF_UMOUNT_DISCARD_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) clear_sbi_flag(sbi, SBI_NEED_FSCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) for (i = 0; i < NR_COUNT_TYPE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) atomic_set(&sbi->nr_pages[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) for (i = 0; i < META; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) atomic_set(&sbi->wb_sync_req[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) INIT_LIST_HEAD(&sbi->s_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) mutex_init(&sbi->umount_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) init_f2fs_rwsem(&sbi->io_order_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) spin_lock_init(&sbi->cp_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) sbi->dirty_device = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) spin_lock_init(&sbi->dev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) init_f2fs_rwsem(&sbi->sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) init_f2fs_rwsem(&sbi->pin_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) static int init_percpu_info(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) err = percpu_counter_init(&sbi->total_valid_inode_count, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) percpu_counter_destroy(&sbi->alloc_valid_block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) #ifdef CONFIG_BLK_DEV_ZONED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) struct f2fs_report_zones_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) struct f2fs_dev_info *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) bool zone_cap_mismatch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) struct f2fs_report_zones_args *rz_args = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) set_bit(idx, rz_args->dev->blkz_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) rz_args->dev->zone_capacity_blocks[idx] = zone->capacity >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) F2FS_LOG_SECTORS_PER_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) if (zone->len != zone->capacity && !rz_args->zone_cap_mismatch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) rz_args->zone_cap_mismatch = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) struct block_device *bdev = FDEV(devi).bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) sector_t nr_sectors = bdev->bd_part->nr_sects;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) struct f2fs_report_zones_args rep_zone_arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) if (!f2fs_sb_has_blkzoned(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) __ilog2_u32(sbi->blocks_per_blkz))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) sbi->log_blocks_per_blkz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) FDEV(devi).nr_blkz++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) FDEV(devi).blkz_seq = f2fs_kvzalloc(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) BITS_TO_LONGS(FDEV(devi).nr_blkz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) * sizeof(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) if (!FDEV(devi).blkz_seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) /* Get block zones type and zone-capacity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) FDEV(devi).zone_capacity_blocks = f2fs_kzalloc(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) FDEV(devi).nr_blkz * sizeof(block_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) if (!FDEV(devi).zone_capacity_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) rep_zone_arg.dev = &FDEV(devi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) rep_zone_arg.zone_cap_mismatch = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES, f2fs_report_zone_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) &rep_zone_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) if (!rep_zone_arg.zone_cap_mismatch) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) kfree(FDEV(devi).zone_capacity_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) FDEV(devi).zone_capacity_blocks = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) * Read f2fs raw super block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) * Because we have two copies of super block, so read both of them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) * to get the first valid one. If any one of them is broken, we pass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) * them recovery flag back to the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) static int read_raw_super_block(struct f2fs_sb_info *sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) struct f2fs_super_block **raw_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) int *valid_super_block, int *recovery)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) struct super_block *sb = sbi->sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) int block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) struct f2fs_super_block *super;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) if (!super)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) for (block = 0; block < 2; block++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) bh = sb_bread(sb, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) if (!bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) f2fs_err(sbi, "Unable to read %dth superblock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) block + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) *recovery = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) /* sanity checking of raw super */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) err = sanity_check_raw_super(sbi, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) block + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) *recovery = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) if (!*raw_super) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) memcpy(super, bh->b_data + F2FS_SUPER_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) sizeof(*super));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) *valid_super_block = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) *raw_super = super;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) /* No valid superblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) if (!*raw_super)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) kfree(super);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) __u32 crc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) if ((recover && f2fs_readonly(sbi->sb)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) bdev_read_only(sbi->sb->s_bdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) set_sbi_flag(sbi, SBI_NEED_SB_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) /* we should update superblock crc here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) if (!recover && f2fs_sb_has_sb_chksum(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) crc = f2fs_crc32(sbi, F2FS_RAW_SUPER(sbi),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) offsetof(struct f2fs_super_block, crc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) F2FS_RAW_SUPER(sbi)->crc = cpu_to_le32(crc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) /* write back-up superblock first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) bh = sb_bread(sbi->sb, sbi->valid_super_block ? 0 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) if (!bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) /* if we are in recovery path, skip writing valid superblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) if (recover || err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) /* write current valid superblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) bh = sb_bread(sbi->sb, sbi->valid_super_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) if (!bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) unsigned int max_devices = MAX_DEVICES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) /* Initialize single device information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) if (!RDEV(0).path[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) if (!bdev_is_zoned(sbi->sb->s_bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) max_devices = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) * Initialize multiple devices information, or single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) * zoned block device information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) sbi->devs = f2fs_kzalloc(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) array_size(max_devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) sizeof(struct f2fs_dev_info)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) if (!sbi->devs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) for (i = 0; i < max_devices; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) if (i > 0 && !RDEV(i).path[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) if (max_devices == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) /* Single zoned block device mount */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) FDEV(0).bdev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) blkdev_get_by_dev(sbi->sb->s_bdev->bd_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) sbi->sb->s_mode, sbi->sb->s_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) /* Multi-device mount */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) memcpy(FDEV(i).path, RDEV(i).path, MAX_PATH_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) FDEV(i).total_segments =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) le32_to_cpu(RDEV(i).total_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) FDEV(i).start_blk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) FDEV(i).end_blk = FDEV(i).start_blk +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) (FDEV(i).total_segments <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) sbi->log_blocks_per_seg) - 1 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) le32_to_cpu(raw_super->segment0_blkaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) FDEV(i).start_blk = FDEV(i - 1).end_blk + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) FDEV(i).end_blk = FDEV(i).start_blk +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) (FDEV(i).total_segments <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) sbi->log_blocks_per_seg) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) FDEV(i).bdev = blkdev_get_by_path(FDEV(i).path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) sbi->sb->s_mode, sbi->sb->s_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) if (IS_ERR(FDEV(i).bdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) return PTR_ERR(FDEV(i).bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) /* to release errored devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) sbi->s_ndevs = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) #ifdef CONFIG_BLK_DEV_ZONED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) !f2fs_sb_has_blkzoned(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) f2fs_err(sbi, "Zoned block device feature not enabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) if (init_blkz_info(sbi, i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) if (max_devices == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) i, FDEV(i).path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) FDEV(i).total_segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) FDEV(i).start_blk, FDEV(i).end_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) "Host-aware" : "Host-managed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) i, FDEV(i).path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) FDEV(i).total_segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) FDEV(i).start_blk, FDEV(i).end_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) f2fs_info(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) #ifdef CONFIG_UNICODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) const struct f2fs_sb_encodings *encoding_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) struct unicode_map *encoding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) __u16 encoding_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) &encoding_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) f2fs_err(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) "Encoding requested by superblock is unknown");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) encoding = utf8_load(encoding_info->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) if (IS_ERR(encoding)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) f2fs_err(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) "can't mount with superblock charset: %s-%s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) "not supported by the kernel. flags: 0x%x.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) encoding_info->name, encoding_info->version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) encoding_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) return PTR_ERR(encoding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) f2fs_info(sbi, "Using encoding defined by superblock: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) "%s-%s with flags 0x%hx", encoding_info->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) encoding_info->version?:"\b", encoding_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) sbi->sb->s_encoding = encoding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) sbi->sb->s_encoding_flags = encoding_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) if (f2fs_sb_has_casefold(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) struct f2fs_sm_info *sm_i = SM_I(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) /* adjust parameters according to the volume size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) sm_i->dcc_info->discard_granularity = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) sbi->readdir_ra = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) struct f2fs_sb_info *sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) struct f2fs_super_block *raw_super;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) struct inode *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) bool skip_recovery = false, need_fsck = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) char *options = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) int recovery, i, valid_super_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) struct curseg_info *seg_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) int retry_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) try_onemore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) raw_super = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) valid_super_block = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) recovery = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) /* allocate memory for f2fs-specific super block info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) if (!sbi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) sbi->sb = sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) /* Load the checksum driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) if (IS_ERR(sbi->s_chksum_driver)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) f2fs_err(sbi, "Cannot load crc32 driver.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) err = PTR_ERR(sbi->s_chksum_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) sbi->s_chksum_driver = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) goto free_sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) /* set a block size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) f2fs_err(sbi, "unable to set blocksize");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) goto free_sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) err = read_raw_super_block(sbi, &raw_super, &valid_super_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) &recovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) goto free_sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) sb->s_fs_info = sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) sbi->raw_super = raw_super;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) /* precompute checksum seed for metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) if (f2fs_sb_has_inode_chksum(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) sizeof(raw_super->uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) default_options(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) /* parse mount options */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) options = kstrdup((const char *)data, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) if (data && !options) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) goto free_sb_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) err = parse_options(sb, options, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) goto free_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) sb->s_maxbytes = max_file_blocks(NULL) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) le32_to_cpu(raw_super->log_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) sb->s_max_links = F2FS_LINK_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) err = f2fs_setup_casefold(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) goto free_options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) sb->dq_op = &f2fs_quota_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) sb->s_qcop = &f2fs_quotactl_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) if (f2fs_sb_has_quota_ino(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) for (i = 0; i < MAXQUOTAS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) if (f2fs_qf_ino(sbi->sb, i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) sbi->nquota_files++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) sb->s_op = &f2fs_sops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) #ifdef CONFIG_FS_ENCRYPTION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) sb->s_cop = &f2fs_cryptops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) #ifdef CONFIG_FS_VERITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) sb->s_vop = &f2fs_verityops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) sb->s_xattr = f2fs_xattr_handlers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) sb->s_export_op = &f2fs_export_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) sb->s_magic = F2FS_SUPER_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) sb->s_time_gran = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) sb->s_iflags |= SB_I_CGROUPWB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) /* init f2fs-specific super block info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) sbi->valid_super_block = valid_super_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) init_f2fs_rwsem(&sbi->gc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) mutex_init(&sbi->writepages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) init_f2fs_rwsem(&sbi->cp_global_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) init_f2fs_rwsem(&sbi->node_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) init_f2fs_rwsem(&sbi->node_change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) /* disallow all the data/node/meta page writes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) set_sbi_flag(sbi, SBI_POR_DOING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) spin_lock_init(&sbi->stat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) /* init iostat info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) spin_lock_init(&sbi->iostat_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) sbi->iostat_enable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) sbi->iostat_period_ms = DEFAULT_IOSTAT_PERIOD_MS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) for (i = 0; i < NR_PAGE_TYPE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) int n = (i == META) ? 1 : NR_TEMP_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) sbi->write_io[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) f2fs_kmalloc(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) array_size(n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) sizeof(struct f2fs_bio_info)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) if (!sbi->write_io[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) goto free_bio_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) for (j = HOT; j < n; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) sbi->write_io[i][j].sbi = sbi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) sbi->write_io[i][j].bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) spin_lock_init(&sbi->write_io[i][j].io_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) INIT_LIST_HEAD(&sbi->write_io[i][j].io_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) init_f2fs_rwsem(&sbi->cp_rwsem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) init_f2fs_rwsem(&sbi->quota_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) init_waitqueue_head(&sbi->cp_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) init_sb_info(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) err = init_percpu_info(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) goto free_bio_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) if (F2FS_IO_ALIGNED(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) sbi->write_io_dummy =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi) - 1), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) if (!sbi->write_io_dummy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) goto free_percpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) /* init per sbi slab cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) err = f2fs_init_xattr_caches(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) goto free_io_dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) err = f2fs_init_page_array_cache(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) goto free_xattr_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) /* get an inode for meta space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) if (IS_ERR(sbi->meta_inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) f2fs_err(sbi, "Failed to read F2FS meta data inode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) err = PTR_ERR(sbi->meta_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) goto free_page_array_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) err = f2fs_get_valid_checkpoint(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) goto free_meta_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_QUOTA_NEED_FSCK_FLAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_DISABLED_QUICK_FLAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) set_sbi_flag(sbi, SBI_NEED_FSCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) /* Initialize device list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) err = f2fs_scan_devices(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) f2fs_err(sbi, "Failed to find devices");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) goto free_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) err = f2fs_init_post_read_wq(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) f2fs_err(sbi, "Failed to initialize post read workqueue");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) goto free_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) sbi->total_valid_node_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) le32_to_cpu(sbi->ckpt->valid_node_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) percpu_counter_set(&sbi->total_valid_inode_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) le32_to_cpu(sbi->ckpt->valid_inode_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) sbi->total_valid_block_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) le64_to_cpu(sbi->ckpt->valid_block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) sbi->last_valid_block_count = sbi->total_valid_block_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) sbi->reserved_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) sbi->current_reserved_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) limit_reserve_root(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) adjust_unusable_cap_perc(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) for (i = 0; i < NR_INODE_TYPE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) INIT_LIST_HEAD(&sbi->inode_list[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) spin_lock_init(&sbi->inode_lock[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) mutex_init(&sbi->flush_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) f2fs_init_extent_cache_info(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) f2fs_init_ino_entry_info(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) f2fs_init_fsync_node_info(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) /* setup checkpoint request control and start checkpoint issue thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) f2fs_init_ckpt_req_control(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) if (!f2fs_readonly(sb) && !test_opt(sbi, DISABLE_CHECKPOINT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) test_opt(sbi, MERGE_CHECKPOINT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) err = f2fs_start_ckpt_thread(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) f2fs_err(sbi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) "Failed to start F2FS issue_checkpoint_thread (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) goto stop_ckpt_thread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) /* setup f2fs internal modules */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) err = f2fs_build_segment_manager(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) goto free_sm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) err = f2fs_build_node_manager(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) goto free_nm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) err = adjust_reserved_segment(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) goto free_nm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) /* For write statistics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) /* Read accumulated write IO statistics if exists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) if (__exist_node_summaries(sbi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) sbi->kbytes_written =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) le64_to_cpu(seg_i->journal->info.kbytes_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) f2fs_build_gc_manager(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) atomic_set(&sbi->no_cp_fsync_pages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) err = f2fs_build_stats(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) goto free_nm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) /* get an inode for node space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) if (IS_ERR(sbi->node_inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) f2fs_err(sbi, "Failed to read node inode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) err = PTR_ERR(sbi->node_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) goto free_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) /* read root inode and dentry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) if (IS_ERR(root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) f2fs_err(sbi, "Failed to read root inode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) err = PTR_ERR(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) goto free_node_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) !root->i_size || !root->i_nlink) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) iput(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) goto free_node_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) sb->s_root = d_make_root(root); /* allocate root dentry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) if (!sb->s_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) goto free_node_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) err = f2fs_init_compress_inode(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) goto free_root_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) err = f2fs_register_sysfs(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) goto free_compress_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) /* Enable quota usage during mount */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) err = f2fs_enable_quotas(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) /* if there are any orphan inodes, free them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) err = f2fs_recover_orphan_inodes(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) goto free_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) if (unlikely(is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) goto reset_checkpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) /* recover fsynced data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) if (!test_opt(sbi, DISABLE_ROLL_FORWARD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) !test_opt(sbi, NORECOVERY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) * mount should be failed, when device has readonly mode, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) * previous checkpoint was not done by clean system shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) if (f2fs_hw_is_readonly(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) err = f2fs_recover_fsync_data(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) if (err > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) err = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) f2fs_err(sbi, "Need to recover fsync data, but "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) "write access unavailable, please try "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) "mount w/ disable_roll_forward or norecovery");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) goto free_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) f2fs_info(sbi, "write access unavailable, skipping recovery");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) goto reset_checkpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) if (need_fsck)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) set_sbi_flag(sbi, SBI_NEED_FSCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) if (skip_recovery)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) goto reset_checkpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) err = f2fs_recover_fsync_data(sbi, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) if (err != -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) skip_recovery = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) need_fsck = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) goto free_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) err = f2fs_recover_fsync_data(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) if (!f2fs_readonly(sb) && err > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) f2fs_err(sbi, "Need to recover fsync data");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) goto free_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) * If the f2fs is not readonly and fsync data recovery succeeds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) * check zoned block devices' write pointer consistency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) if (!err && !f2fs_readonly(sb) && f2fs_sb_has_blkzoned(sbi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) err = f2fs_check_write_pointer(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) goto free_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) reset_checkpoint:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) f2fs_init_inmem_curseg(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) /* f2fs_recover_fsync_data() cleared this already */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) clear_sbi_flag(sbi, SBI_POR_DOING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) if (test_opt(sbi, DISABLE_CHECKPOINT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) err = f2fs_disable_checkpoint(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) goto sync_free_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) } else if (is_set_ckpt_flags(sbi, CP_DISABLED_FLAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) f2fs_enable_checkpoint(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) * If filesystem is not mounted as read-only then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) * do start the gc_thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) if ((F2FS_OPTION(sbi).bggc_mode != BGGC_MODE_OFF ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) test_opt(sbi, GC_MERGE)) && !f2fs_readonly(sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) /* After POR, we can run background GC thread.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) err = f2fs_start_gc_thread(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) goto sync_free_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) kvfree(options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) /* recover broken superblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) if (recovery) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) err = f2fs_commit_super(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) sbi->valid_super_block ? 1 : 2, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) f2fs_join_shrinker(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) f2fs_tuning_parameters(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) cur_cp_version(F2FS_CKPT(sbi)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) f2fs_update_time(sbi, CP_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) f2fs_update_time(sbi, REQ_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) sync_free_meta:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) /* safe to flush all the data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) sync_filesystem(sbi->sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) retry_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) free_meta:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) f2fs_truncate_quota_inode_pages(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) f2fs_quota_off_umount(sbi->sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) * Some dirty meta pages can be produced by f2fs_recover_orphan_inodes()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) * followed by f2fs_write_checkpoint() through f2fs_write_node_pages(), which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) * falls into an infinite loop in f2fs_sync_meta_pages().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) truncate_inode_pages_final(META_MAPPING(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) /* evict some inodes being cached by GC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) evict_inodes(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) f2fs_unregister_sysfs(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) free_compress_inode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) f2fs_destroy_compress_inode(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) free_root_inode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) dput(sb->s_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) sb->s_root = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) free_node_inode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) f2fs_release_ino_entry(sbi, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) truncate_inode_pages_final(NODE_MAPPING(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) iput(sbi->node_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) sbi->node_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) free_stats:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) f2fs_destroy_stats(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) free_nm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) /* stop discard thread before destroying node manager */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) f2fs_stop_discard_thread(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) f2fs_destroy_node_manager(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) free_sm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) f2fs_destroy_segment_manager(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) f2fs_destroy_post_read_wq(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) stop_ckpt_thread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) f2fs_stop_ckpt_thread(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) free_devices:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) destroy_device_list(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) kvfree(sbi->ckpt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) free_meta_inode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) make_bad_inode(sbi->meta_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) iput(sbi->meta_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) sbi->meta_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) free_page_array_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) f2fs_destroy_page_array_cache(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) free_xattr_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) f2fs_destroy_xattr_caches(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) free_io_dummy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) mempool_destroy(sbi->write_io_dummy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) free_percpu:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) destroy_percpu_info(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) free_bio_info:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) for (i = 0; i < NR_PAGE_TYPE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) kvfree(sbi->write_io[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) #ifdef CONFIG_UNICODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) utf8_unload(sb->s_encoding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) sb->s_encoding = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) free_options:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) #ifdef CONFIG_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) for (i = 0; i < MAXQUOTAS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) kfree(F2FS_OPTION(sbi).s_qf_names[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) kvfree(options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) free_sb_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) kfree(raw_super);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) free_sbi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) if (sbi->s_chksum_driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) crypto_free_shash(sbi->s_chksum_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) kfree(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) /* give only one another chance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) if (retry_cnt > 0 && skip_recovery) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) retry_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) shrink_dcache_sb(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) goto try_onemore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) static struct dentry *f2fs_mount(struct file_system_type *fs_type, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) const char *dev_name, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) return mount_bdev(fs_type, flags, dev_name, data, f2fs_fill_super);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) static void kill_f2fs_super(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) if (sb->s_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) struct f2fs_sb_info *sbi = F2FS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) set_sbi_flag(sbi, SBI_IS_CLOSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) f2fs_stop_gc_thread(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) f2fs_stop_discard_thread(sbi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) #ifdef CONFIG_F2FS_FS_COMPRESSION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) * latter evict_inode() can bypass checking and invalidating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) * compress inode cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) if (test_opt(sbi, COMPRESS_CACHE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) truncate_inode_pages_final(COMPRESS_MAPPING(sbi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) struct cp_control cpc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) .reason = CP_UMOUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) f2fs_write_checkpoint(sbi, &cpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) if (is_sbi_flag_set(sbi, SBI_IS_RECOVERED) && f2fs_readonly(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) sb->s_flags &= ~SB_RDONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) kill_block_super(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) static struct file_system_type f2fs_fs_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) .name = "f2fs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) .mount = f2fs_mount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) .kill_sb = kill_f2fs_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) .fs_flags = FS_REQUIRES_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) MODULE_ALIAS_FS("f2fs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) static int __init init_inodecache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) f2fs_inode_cachep = kmem_cache_create("f2fs_inode_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) sizeof(struct f2fs_inode_info), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) if (!f2fs_inode_cachep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) static void destroy_inodecache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) * Make sure all delayed rcu free inodes are flushed before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) * destroy cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) kmem_cache_destroy(f2fs_inode_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) static int __init init_f2fs_fs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) if (PAGE_SIZE != F2FS_BLKSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) PAGE_SIZE, F2FS_BLKSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) err = init_inodecache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) err = f2fs_create_node_manager_caches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) goto free_inodecache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) err = f2fs_create_segment_manager_caches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) goto free_node_manager_caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) err = f2fs_create_checkpoint_caches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) goto free_segment_manager_caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) err = f2fs_create_recovery_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) goto free_checkpoint_caches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) err = f2fs_create_extent_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) goto free_recovery_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) err = f2fs_create_garbage_collection_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) goto free_extent_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) err = f2fs_init_sysfs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) goto free_garbage_collection_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) err = register_shrinker(&f2fs_shrinker_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) goto free_sysfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) err = register_filesystem(&f2fs_fs_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) goto free_shrinker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) f2fs_create_root_stats();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) err = f2fs_init_post_read_processing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) goto free_root_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) err = f2fs_init_bio_entry_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) goto free_post_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) err = f2fs_init_bioset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) goto free_bio_enrty_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) err = f2fs_init_compress_mempool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) goto free_bioset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) err = f2fs_init_compress_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) goto free_compress_mempool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) err = f2fs_create_casefold_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) goto free_compress_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) free_compress_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) f2fs_destroy_compress_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) free_compress_mempool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) f2fs_destroy_compress_mempool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) free_bioset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) f2fs_destroy_bioset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) free_bio_enrty_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) f2fs_destroy_bio_entry_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) free_post_read:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) f2fs_destroy_post_read_processing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) free_root_stats:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) f2fs_destroy_root_stats();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) unregister_filesystem(&f2fs_fs_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) free_shrinker:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) unregister_shrinker(&f2fs_shrinker_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) free_sysfs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) f2fs_exit_sysfs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) free_garbage_collection_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) f2fs_destroy_garbage_collection_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) free_extent_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) f2fs_destroy_extent_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) free_recovery_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) f2fs_destroy_recovery_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) free_checkpoint_caches:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) f2fs_destroy_checkpoint_caches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) free_segment_manager_caches:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) f2fs_destroy_segment_manager_caches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) free_node_manager_caches:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) f2fs_destroy_node_manager_caches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) free_inodecache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) destroy_inodecache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) static void __exit exit_f2fs_fs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) f2fs_destroy_casefold_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) f2fs_destroy_compress_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) f2fs_destroy_compress_mempool();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) f2fs_destroy_bioset();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) f2fs_destroy_bio_entry_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) f2fs_destroy_post_read_processing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) f2fs_destroy_root_stats();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) unregister_filesystem(&f2fs_fs_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) unregister_shrinker(&f2fs_shrinker_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) f2fs_exit_sysfs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) f2fs_destroy_garbage_collection_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) f2fs_destroy_extent_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) f2fs_destroy_recovery_cache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) f2fs_destroy_checkpoint_caches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) f2fs_destroy_segment_manager_caches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) f2fs_destroy_node_manager_caches();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) destroy_inodecache();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) module_init(init_f2fs_fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) module_exit(exit_f2fs_fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) MODULE_AUTHOR("Samsung Electronics's Praesto Team");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) MODULE_DESCRIPTION("Flash Friendly File System");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) MODULE_IMPORT_NS(ANDROID_GKI_VFS_EXPORT_ONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) MODULE_SOFTDEP("pre: crc32");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498)