^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2000-2006 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "xfs_log_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "xfs_sb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "xfs_inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "xfs_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "xfs_bmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "xfs_alloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "xfs_fsops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "xfs_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "xfs_buf_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "xfs_log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "xfs_log_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "xfs_dir2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "xfs_extfree_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "xfs_mru_cache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "xfs_inode_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "xfs_icache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "xfs_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "xfs_icreate_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "xfs_filestream.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "xfs_quota.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "xfs_sysfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "xfs_ondisk.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "xfs_rmap_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "xfs_refcount_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "xfs_bmap_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "xfs_reflink.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/magic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/fs_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/fs_parser.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static const struct super_operations xfs_super_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static struct kset *xfs_kset; /* top-level xfs sysfs dir */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) enum xfs_dax_mode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) XFS_DAX_INODE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) XFS_DAX_ALWAYS = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) XFS_DAX_NEVER = 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) xfs_mount_set_dax_mode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) enum xfs_dax_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) switch (mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) case XFS_DAX_INODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) case XFS_DAX_ALWAYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) mp->m_flags |= XFS_MOUNT_DAX_ALWAYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) mp->m_flags &= ~XFS_MOUNT_DAX_NEVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) case XFS_DAX_NEVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) mp->m_flags |= XFS_MOUNT_DAX_NEVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static const struct constant_table dax_param_enums[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {"inode", XFS_DAX_INODE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {"always", XFS_DAX_ALWAYS },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {"never", XFS_DAX_NEVER },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Table driven mount option parser.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static const struct fs_parameter_spec xfs_fs_parameters[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) fsparam_u32("logbufs", Opt_logbufs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) fsparam_string("logbsize", Opt_logbsize),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) fsparam_string("logdev", Opt_logdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) fsparam_string("rtdev", Opt_rtdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) fsparam_flag("wsync", Opt_wsync),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) fsparam_flag("noalign", Opt_noalign),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) fsparam_flag("swalloc", Opt_swalloc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) fsparam_u32("sunit", Opt_sunit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) fsparam_u32("swidth", Opt_swidth),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) fsparam_flag("nouuid", Opt_nouuid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) fsparam_flag("grpid", Opt_grpid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) fsparam_flag("nogrpid", Opt_nogrpid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) fsparam_flag("bsdgroups", Opt_bsdgroups),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) fsparam_flag("sysvgroups", Opt_sysvgroups),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) fsparam_string("allocsize", Opt_allocsize),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) fsparam_flag("norecovery", Opt_norecovery),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) fsparam_flag("inode64", Opt_inode64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) fsparam_flag("inode32", Opt_inode32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) fsparam_flag("ikeep", Opt_ikeep),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) fsparam_flag("noikeep", Opt_noikeep),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) fsparam_flag("largeio", Opt_largeio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) fsparam_flag("nolargeio", Opt_nolargeio),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) fsparam_flag("attr2", Opt_attr2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) fsparam_flag("noattr2", Opt_noattr2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) fsparam_flag("filestreams", Opt_filestreams),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) fsparam_flag("quota", Opt_quota),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) fsparam_flag("noquota", Opt_noquota),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) fsparam_flag("usrquota", Opt_usrquota),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) fsparam_flag("grpquota", Opt_grpquota),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) fsparam_flag("prjquota", Opt_prjquota),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) fsparam_flag("uquota", Opt_uquota),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) fsparam_flag("gquota", Opt_gquota),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) fsparam_flag("pquota", Opt_pquota),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) fsparam_flag("uqnoenforce", Opt_uqnoenforce),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) fsparam_flag("gqnoenforce", Opt_gqnoenforce),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) fsparam_flag("pqnoenforce", Opt_pqnoenforce),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) fsparam_flag("qnoenforce", Opt_qnoenforce),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) fsparam_flag("discard", Opt_discard),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) fsparam_flag("nodiscard", Opt_nodiscard),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) fsparam_flag("dax", Opt_dax),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) fsparam_enum("dax", Opt_dax_enum, dax_param_enums),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct proc_xfs_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) uint64_t flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) char *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) xfs_fs_show_options(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct seq_file *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct dentry *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static struct proc_xfs_info xfs_info_set[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /* the few simple ones we can get from the mount struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) { XFS_MOUNT_IKEEP, ",ikeep" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) { XFS_MOUNT_WSYNC, ",wsync" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) { XFS_MOUNT_NOALIGN, ",noalign" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) { XFS_MOUNT_SWALLOC, ",swalloc" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) { XFS_MOUNT_NOUUID, ",nouuid" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) { XFS_MOUNT_NORECOVERY, ",norecovery" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) { XFS_MOUNT_ATTR2, ",attr2" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) { XFS_MOUNT_FILESTREAMS, ",filestreams" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) { XFS_MOUNT_GRPID, ",grpid" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) { XFS_MOUNT_DISCARD, ",discard" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) { XFS_MOUNT_LARGEIO, ",largeio" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) { XFS_MOUNT_DAX_ALWAYS, ",dax=always" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) { XFS_MOUNT_DAX_NEVER, ",dax=never" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) { 0, NULL }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct xfs_mount *mp = XFS_M(root->d_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct proc_xfs_info *xfs_infop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (mp->m_flags & xfs_infop->flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) seq_puts(m, xfs_infop->str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) seq_printf(m, ",inode%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) (mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) seq_printf(m, ",allocsize=%dk",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) (1 << mp->m_allocsize_log) >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (mp->m_logbufs > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) seq_printf(m, ",logbufs=%d", mp->m_logbufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (mp->m_logbsize > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) if (mp->m_logname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) seq_show_option(m, "logdev", mp->m_logname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (mp->m_rtname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) seq_show_option(m, "rtdev", mp->m_rtname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (mp->m_dalign > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) seq_printf(m, ",sunit=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (mp->m_swidth > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) seq_printf(m, ",swidth=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) seq_puts(m, ",usrquota");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) else if (mp->m_qflags & XFS_UQUOTA_ACCT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) seq_puts(m, ",uqnoenforce");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (mp->m_qflags & XFS_PQUOTA_ACCT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (mp->m_qflags & XFS_PQUOTA_ENFD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) seq_puts(m, ",prjquota");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) seq_puts(m, ",pqnoenforce");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (mp->m_qflags & XFS_GQUOTA_ACCT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (mp->m_qflags & XFS_GQUOTA_ENFD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) seq_puts(m, ",grpquota");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) seq_puts(m, ",gqnoenforce");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) seq_puts(m, ",noquota");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * Set parameters for inode allocation heuristics, taking into account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * filesystem size and inode32/inode64 mount options; i.e. specifically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * whether or not XFS_MOUNT_SMALL_INUMS is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * Inode allocation patterns are altered only if inode32 is requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * If altered, XFS_MOUNT_32BITINODES is set as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * An agcount independent of that in the mount structure is provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * to the potentially higher ag count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * Returns the maximum AG index which may contain inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) xfs_agnumber_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) xfs_set_inode_alloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) xfs_agnumber_t agcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) xfs_agnumber_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) xfs_agnumber_t maxagi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) xfs_sb_t *sbp = &mp->m_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) xfs_agnumber_t max_metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) xfs_agino_t agino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) xfs_ino_t ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * Calculate how much should be reserved for inodes to meet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * the max inode percentage. Used only for inode32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (M_IGEO(mp)->maxicount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) uint64_t icount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) icount = sbp->sb_dblocks * sbp->sb_imax_pct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) do_div(icount, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) icount += sbp->sb_agblocks - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) do_div(icount, sbp->sb_agblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) max_metadata = icount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) max_metadata = agcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* Get the last possible inode in the filesystem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * If user asked for no more than 32-bit inodes, and the fs is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * the allocator to accommodate the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) mp->m_flags |= XFS_MOUNT_32BITINODES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) mp->m_flags &= ~XFS_MOUNT_32BITINODES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) for (index = 0; index < agcount; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct xfs_perag *pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ino = XFS_AGINO_TO_INO(mp, index, agino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) pag = xfs_perag_get(mp, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (mp->m_flags & XFS_MOUNT_32BITINODES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (ino > XFS_MAXINUMBER_32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) pag->pagi_inodeok = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) pag->pagf_metadata = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) pag->pagi_inodeok = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) maxagi++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (index < max_metadata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) pag->pagf_metadata = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) pag->pagf_metadata = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) pag->pagi_inodeok = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) pag->pagf_metadata = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) xfs_perag_put(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) xfs_blkdev_get(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) xfs_mount_t *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct block_device **bdevp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (IS_ERR(*bdevp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) error = PTR_ERR(*bdevp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) xfs_blkdev_put(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct block_device *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) xfs_blkdev_issue_flush(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) xfs_buftarg_t *buftarg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) xfs_close_devices(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) xfs_free_buftarg(mp->m_logdev_targp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) xfs_blkdev_put(logdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) fs_put_dax(dax_logdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (mp->m_rtdev_targp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) xfs_free_buftarg(mp->m_rtdev_targp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) xfs_blkdev_put(rtdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) fs_put_dax(dax_rtdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) xfs_free_buftarg(mp->m_ddev_targp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) fs_put_dax(dax_ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * The file system configurations are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * (1) device (partition) with data and internal log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * (2) logical volume with data and log subvolumes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * (3) logical volume with data, log, and realtime subvolumes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * We only have to handle opening the log and realtime volumes here if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * they are present. The data subvolume has already been opened by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * get_sb_bdev() and is stored in sb->s_bdev.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) xfs_open_devices(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct block_device *ddev = mp->m_super->s_bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct block_device *logdev = NULL, *rtdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * Open real time and log devices - order is important.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (mp->m_logname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) dax_logdev = fs_dax_get_by_bdev(logdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (mp->m_rtname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) goto out_close_logdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (rtdev == ddev || rtdev == logdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) "Cannot mount filesystem with identical rtdev and ddev/logdev.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) goto out_close_rtdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) dax_rtdev = fs_dax_get_by_bdev(rtdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * Setup xfs_mount buffer target pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) if (!mp->m_ddev_targp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) goto out_close_rtdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (rtdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!mp->m_rtdev_targp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) goto out_free_ddev_targ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (logdev && logdev != ddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (!mp->m_logdev_targp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) goto out_free_rtdev_targ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) mp->m_logdev_targp = mp->m_ddev_targp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) out_free_rtdev_targ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (mp->m_rtdev_targp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) xfs_free_buftarg(mp->m_rtdev_targp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) out_free_ddev_targ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) xfs_free_buftarg(mp->m_ddev_targp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) out_close_rtdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) xfs_blkdev_put(rtdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) fs_put_dax(dax_rtdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) out_close_logdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (logdev && logdev != ddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) xfs_blkdev_put(logdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) fs_put_dax(dax_logdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) fs_put_dax(dax_ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * Setup xfs_mount buffer target pointers based on superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) xfs_setup_devices(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) unsigned int log_sector_size = BBSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (xfs_sb_version_hassector(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) log_sector_size = mp->m_sb.sb_logsectsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) error = xfs_setsize_buftarg(mp->m_logdev_targp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) log_sector_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (mp->m_rtdev_targp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) error = xfs_setsize_buftarg(mp->m_rtdev_targp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) mp->m_sb.sb_sectsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) xfs_init_mount_workqueues(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_super->s_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (!mp->m_buf_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (!mp->m_unwritten_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) goto out_destroy_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 0, mp->m_super->s_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (!mp->m_cil_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) goto out_destroy_unwritten;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (!mp->m_reclaim_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) goto out_destroy_cil;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_super->s_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (!mp->m_eofblocks_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) goto out_destroy_reclaim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) mp->m_super->s_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (!mp->m_sync_workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) goto out_destroy_eofb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) out_destroy_eofb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) destroy_workqueue(mp->m_eofblocks_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) out_destroy_reclaim:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) destroy_workqueue(mp->m_reclaim_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) out_destroy_cil:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) destroy_workqueue(mp->m_cil_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) out_destroy_unwritten:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) destroy_workqueue(mp->m_unwritten_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) out_destroy_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) destroy_workqueue(mp->m_buf_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) xfs_destroy_mount_workqueues(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) destroy_workqueue(mp->m_sync_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) destroy_workqueue(mp->m_eofblocks_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) destroy_workqueue(mp->m_reclaim_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) destroy_workqueue(mp->m_cil_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) destroy_workqueue(mp->m_unwritten_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) destroy_workqueue(mp->m_buf_workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) xfs_flush_inodes_worker(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct xfs_mount *mp = container_of(work, struct xfs_mount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) m_flush_inodes_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct super_block *sb = mp->m_super;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (down_read_trylock(&sb->s_umount)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) sync_inodes_sb(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) up_read(&sb->s_umount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * for IO to complete so that we effectively throttle multiple callers to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * rate at which IO is completing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) xfs_flush_inodes(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * If flush_work() returns true then that means we waited for a flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * which was already in progress. Don't bother running another scan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (flush_work(&mp->m_flush_inodes_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) flush_work(&mp->m_flush_inodes_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /* Catch misguided souls that try to use this interface on XFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) STATIC struct inode *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) xfs_fs_alloc_inode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) xfs_check_delalloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) int whichfork)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct xfs_bmbt_irec got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct xfs_iext_cursor icur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) if (isnullstartblock(got.br_startblock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) xfs_warn(ip->i_mount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) ip->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) whichfork == XFS_DATA_FORK ? "data" : "cow",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) got.br_startoff, got.br_blockcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) } while (xfs_iext_next_extent(ifp, &icur, &got));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) #define xfs_check_delalloc(ip, whichfork) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * Now that the generic code is guaranteed not to be accessing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * the linux inode, we can inactivate and reclaim the inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) xfs_fs_destroy_inode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct xfs_inode *ip = XFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) trace_xfs_destroy_inode(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ASSERT(!rwsem_is_locked(&inode->i_rwsem));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) XFS_STATS_INC(ip->i_mount, vn_rele);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) XFS_STATS_INC(ip->i_mount, vn_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) xfs_inactive(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) xfs_check_delalloc(ip, XFS_DATA_FORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) xfs_check_delalloc(ip, XFS_COW_FORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) XFS_STATS_INC(ip->i_mount, vn_reclaim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * We should never get here with one of the reclaim flags already set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * We always use background reclaim here because even if the inode is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * clean, it still may be under IO and hence we have wait for IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * completion to occur before we can reclaim the inode. The background
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * reclaim path handles this more efficiently than we can here, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * simply let background reclaim tear down all inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) xfs_inode_set_reclaim_tag(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) xfs_fs_dirty_inode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct xfs_inode *ip = XFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct xfs_mount *mp = ip->i_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct xfs_trans *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (!(inode->i_sb->s_flags & SB_LAZYTIME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) xfs_ilock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) xfs_trans_commit(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * Slab object creation initialisation for the XFS inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * This covers only the idempotent fields in the XFS inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * all other fields need to be initialised on allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * from the slab. This avoids the need to repeatedly initialise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * fields in the xfs inode that left in the initialise state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * when freeing the inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) xfs_fs_inode_init_once(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) void *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct xfs_inode *ip = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) memset(ip, 0, sizeof(struct xfs_inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /* vfs inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) inode_init_once(VFS_I(ip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /* xfs inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) atomic_set(&ip->i_pincount, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) spin_lock_init(&ip->i_flags_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) "xfsino", ip->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) "xfsino", ip->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * We do an unlocked check for XFS_IDONTCACHE here because we are already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * serialised against cache hits here via the inode->i_lock and igrab() in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * racing with us, and it avoids needing to grab a spinlock here for every inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * we drop the final reference on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) xfs_fs_drop_inode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) struct xfs_inode *ip = XFS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * If this unlinked inode is in the middle of recovery, don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * drop the inode just yet; log recovery will take care of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * that. See the comment for this inode flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (ip->i_flags & XFS_IRECOVERY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) return generic_drop_inode(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) xfs_mount_free(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) kfree(mp->m_rtname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) kfree(mp->m_logname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) kmem_free(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) xfs_fs_sync_fs(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) int wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct xfs_mount *mp = XFS_M(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * Doing anything during the async pass would be counterproductive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (!wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) xfs_log_force(mp, XFS_LOG_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (laptop_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * The disk must be active because we're syncing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * We schedule log work now (now that the disk is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * active) instead of later (when it might not be).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) flush_delayed_work(&mp->m_log->l_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) xfs_fs_statfs(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct dentry *dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct kstatfs *statp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) struct xfs_mount *mp = XFS_M(dentry->d_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) xfs_sb_t *sbp = &mp->m_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) struct xfs_inode *ip = XFS_I(d_inode(dentry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) uint64_t fakeinos, id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) uint64_t icount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) uint64_t ifree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) uint64_t fdblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) xfs_extlen_t lsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) int64_t ffree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) statp->f_type = XFS_SUPER_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) statp->f_namelen = MAXNAMELEN - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) statp->f_fsid = u64_to_fsid(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) icount = percpu_counter_sum(&mp->m_icount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ifree = percpu_counter_sum(&mp->m_ifree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) fdblocks = percpu_counter_sum(&mp->m_fdblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) spin_lock(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) statp->f_bsize = sbp->sb_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) statp->f_blocks = sbp->sb_dblocks - lsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) spin_unlock(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /* make sure statp->f_bfree does not underflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) statp->f_bavail = statp->f_bfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (M_IGEO(mp)->maxicount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) statp->f_files = min_t(typeof(statp->f_files),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) statp->f_files,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) M_IGEO(mp)->maxicount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* If sb_icount overshot maxicount, report actual allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) statp->f_files = max_t(typeof(statp->f_files),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) statp->f_files,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) sbp->sb_icount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) /* make sure statp->f_ffree does not underflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) ffree = statp->f_files - (icount - ifree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) statp->f_ffree = max_t(int64_t, ffree, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) xfs_qm_statvfs(ip, statp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (XFS_IS_REALTIME_MOUNT(mp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) statp->f_blocks = sbp->sb_rblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) statp->f_bavail = statp->f_bfree =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) sbp->sb_frextents * sbp->sb_rextsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) xfs_save_resvblks(struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) uint64_t resblks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) mp->m_resblks_save = mp->m_resblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) xfs_reserve_blocks(mp, &resblks, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) xfs_restore_resvblks(struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) uint64_t resblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (mp->m_resblks_save) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) resblks = mp->m_resblks_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) mp->m_resblks_save = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) resblks = xfs_default_resblks(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) xfs_reserve_blocks(mp, &resblks, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * Trigger writeback of all the dirty metadata in the file system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * This ensures that the metadata is written to their location on disk rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * than just existing in transactions in the log. This means after a quiesce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * there is no log replay required to write the inodes to disk - this is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * primary difference between a sync and a quiesce.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * We cancel log work early here to ensure all transactions the log worker may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * run have finished before we clean up and log the superblock and write an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * unmount record. The unfreeze process is responsible for restarting the log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * worker correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) xfs_quiesce_attr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) cancel_delayed_work_sync(&mp->m_log->l_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /* force the log to unpin objects from the now complete transactions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) xfs_log_force(mp, XFS_LOG_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /* Push the superblock and write an unmount record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) error = xfs_log_sbcount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) "Frozen image may not be consistent.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) xfs_log_quiesce(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * Second stage of a freeze. The data is already frozen so we only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * need to take care of the metadata. Once that's done sync the superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * to the log to dirty it in case of a crash while frozen. This ensures that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * will recover the unlinked inode lists on the next mount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) xfs_fs_freeze(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) struct xfs_mount *mp = XFS_M(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * The filesystem is now frozen far enough that memory reclaim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * cannot safely operate on the filesystem. Hence we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * set a GFP_NOFS context here to avoid recursion deadlocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) flags = memalloc_nofs_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) xfs_stop_block_reaping(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) xfs_save_resvblks(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) xfs_quiesce_attr(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ret = xfs_sync_sb(mp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) memalloc_nofs_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) xfs_fs_unfreeze(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct xfs_mount *mp = XFS_M(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) xfs_restore_resvblks(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) xfs_log_work_queue(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) xfs_start_block_reaping(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * This function fills in xfs_mount_t fields based on mount args.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * Note: the superblock _has_ now been read in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) xfs_finish_flags(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /* Fail a mount where the logbuf is smaller than the log stripe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (xfs_sb_version_haslogv2(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (mp->m_logbsize <= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) mp->m_logbsize = mp->m_sb.sb_logsunit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) } else if (mp->m_logbsize > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) mp->m_logbsize < mp->m_sb.sb_logsunit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) "logbuf size must be greater than or equal to log stripe size");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /* Fail a mount if the logbuf is larger than 32K */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) "logbuf size for version 1 logs must be 16K or 32K");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * V5 filesystems always use attr2 format for attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (xfs_sb_version_hascrc(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) (mp->m_flags & XFS_MOUNT_NOATTR2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) "attr2 is always enabled for V5 filesystems.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * mkfs'ed attr2 will turn on attr2 mount unless explicitly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * told by noattr2 to turn it off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (xfs_sb_version_hasattr2(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) !(mp->m_flags & XFS_MOUNT_NOATTR2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) mp->m_flags |= XFS_MOUNT_ATTR2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * prohibit r/w mounts of read-only filesystems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) "cannot mount a read-only filesystem as read-write");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) !xfs_sb_version_has_pquotino(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) "Super block does not support project and group quota together");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) xfs_init_percpu_counters(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) goto free_icount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) goto free_ifree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) goto free_fdblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) free_fdblocks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) percpu_counter_destroy(&mp->m_fdblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) free_ifree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) percpu_counter_destroy(&mp->m_ifree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) free_icount:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) percpu_counter_destroy(&mp->m_icount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) xfs_reinit_percpu_counters(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) xfs_destroy_percpu_counters(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) percpu_counter_destroy(&mp->m_icount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) percpu_counter_destroy(&mp->m_ifree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) percpu_counter_destroy(&mp->m_fdblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) percpu_counter_sum(&mp->m_delalloc_blks) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) percpu_counter_destroy(&mp->m_delalloc_blks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) xfs_fs_put_super(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct xfs_mount *mp = XFS_M(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) /* if ->fill_super failed, we have no mount to tear down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (!sb->s_fs_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) xfs_notice(mp, "Unmounting Filesystem");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) xfs_filestream_unmount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) xfs_unmountfs(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) xfs_freesb(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) free_percpu(mp->m_stats.xs_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) xfs_destroy_percpu_counters(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) xfs_destroy_mount_workqueues(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) xfs_close_devices(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) sb->s_fs_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) xfs_mount_free(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) static long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) xfs_fs_nr_cached_objects(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /* Paranoia: catch incorrect calls during mount setup or teardown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (WARN_ON_ONCE(!sb->s_fs_info))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) return xfs_reclaim_inodes_count(XFS_M(sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) xfs_fs_free_cached_objects(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) struct shrink_control *sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) static const struct super_operations xfs_super_operations = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) .alloc_inode = xfs_fs_alloc_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) .destroy_inode = xfs_fs_destroy_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) .dirty_inode = xfs_fs_dirty_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) .drop_inode = xfs_fs_drop_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) .put_super = xfs_fs_put_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) .sync_fs = xfs_fs_sync_fs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) .freeze_fs = xfs_fs_freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) .unfreeze_fs = xfs_fs_unfreeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) .statfs = xfs_fs_statfs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) .show_options = xfs_fs_show_options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) .nr_cached_objects = xfs_fs_nr_cached_objects,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) .free_cached_objects = xfs_fs_free_cached_objects,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) suffix_kstrtoint(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) const char *s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) unsigned int base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) int *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) int last, shift_left_factor = 0, _res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) char *value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) value = kstrdup(s, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (!value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) last = strlen(value) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (value[last] == 'K' || value[last] == 'k') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) shift_left_factor = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) value[last] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (value[last] == 'M' || value[last] == 'm') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) shift_left_factor = 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) value[last] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (value[last] == 'G' || value[last] == 'g') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) shift_left_factor = 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) value[last] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) if (kstrtoint(value, base, &_res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) kfree(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) *res = _res << shift_left_factor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * Set mount state from a mount option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * NOTE: mp->m_super is NULL here!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) xfs_fc_parse_param(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) struct fs_context *fc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) struct fs_parameter *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) struct xfs_mount *mp = fc->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) struct fs_parse_result result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) int opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) opt = fs_parse(fc, xfs_fs_parameters, param, &result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (opt < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) switch (opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) case Opt_logbufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) mp->m_logbufs = result.uint_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) case Opt_logbsize:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (suffix_kstrtoint(param->string, 10, &mp->m_logbsize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) case Opt_logdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) kfree(mp->m_logname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) mp->m_logname = kstrdup(param->string, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (!mp->m_logname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) case Opt_rtdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) kfree(mp->m_rtname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (!mp->m_rtname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) case Opt_allocsize:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (suffix_kstrtoint(param->string, 10, &size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) mp->m_allocsize_log = ffs(size) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) case Opt_grpid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) case Opt_bsdgroups:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) mp->m_flags |= XFS_MOUNT_GRPID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) case Opt_nogrpid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) case Opt_sysvgroups:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) mp->m_flags &= ~XFS_MOUNT_GRPID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) case Opt_wsync:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) mp->m_flags |= XFS_MOUNT_WSYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) case Opt_norecovery:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) mp->m_flags |= XFS_MOUNT_NORECOVERY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) case Opt_noalign:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) mp->m_flags |= XFS_MOUNT_NOALIGN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) case Opt_swalloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) mp->m_flags |= XFS_MOUNT_SWALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) case Opt_sunit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) mp->m_dalign = result.uint_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) case Opt_swidth:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) mp->m_swidth = result.uint_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) case Opt_inode32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) case Opt_inode64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) case Opt_nouuid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) mp->m_flags |= XFS_MOUNT_NOUUID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) case Opt_largeio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) mp->m_flags |= XFS_MOUNT_LARGEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) case Opt_nolargeio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) mp->m_flags &= ~XFS_MOUNT_LARGEIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) case Opt_filestreams:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) mp->m_flags |= XFS_MOUNT_FILESTREAMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) case Opt_noquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) case Opt_quota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) case Opt_uquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) case Opt_usrquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) XFS_UQUOTA_ENFD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) case Opt_qnoenforce:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) case Opt_uqnoenforce:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) mp->m_qflags &= ~XFS_UQUOTA_ENFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) case Opt_pquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) case Opt_prjquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) XFS_PQUOTA_ENFD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) case Opt_pqnoenforce:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) mp->m_qflags &= ~XFS_PQUOTA_ENFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) case Opt_gquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) case Opt_grpquota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) XFS_GQUOTA_ENFD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) case Opt_gqnoenforce:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) mp->m_qflags &= ~XFS_GQUOTA_ENFD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) case Opt_discard:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) mp->m_flags |= XFS_MOUNT_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) case Opt_nodiscard:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) mp->m_flags &= ~XFS_MOUNT_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) #ifdef CONFIG_FS_DAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) case Opt_dax:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) xfs_mount_set_dax_mode(mp, XFS_DAX_ALWAYS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) case Opt_dax_enum:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) xfs_mount_set_dax_mode(mp, result.uint_32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) /* Following mount options will be removed in September 2025 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) case Opt_ikeep:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) xfs_warn(mp, "%s mount option is deprecated.", param->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) mp->m_flags |= XFS_MOUNT_IKEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) case Opt_noikeep:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) xfs_warn(mp, "%s mount option is deprecated.", param->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) mp->m_flags &= ~XFS_MOUNT_IKEEP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) case Opt_attr2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) xfs_warn(mp, "%s mount option is deprecated.", param->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) mp->m_flags |= XFS_MOUNT_ATTR2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) case Opt_noattr2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) xfs_warn(mp, "%s mount option is deprecated.", param->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) mp->m_flags &= ~XFS_MOUNT_ATTR2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) mp->m_flags |= XFS_MOUNT_NOATTR2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) xfs_warn(mp, "unknown mount option [%s].", param->key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) xfs_fc_validate_params(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * no recovery flag requires a read-only mount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) !(mp->m_flags & XFS_MOUNT_RDONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) xfs_warn(mp, "no-recovery mounts must be read-only.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) (mp->m_dalign || mp->m_swidth)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) "sunit and swidth options incompatible with the noalign option");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) xfs_warn(mp, "quota support not available in this kernel.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if ((mp->m_dalign && !mp->m_swidth) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) (!mp->m_dalign && mp->m_swidth)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) xfs_warn(mp, "sunit and swidth must be specified together");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) "stripe width (%d) must be a multiple of the stripe unit (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) mp->m_swidth, mp->m_dalign);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (mp->m_logbufs != -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) mp->m_logbufs != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) (mp->m_logbufs < XLOG_MIN_ICLOGS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) mp->m_logbufs > XLOG_MAX_ICLOGS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) if (mp->m_logbsize != -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) mp->m_logbsize != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) !is_power_of_2(mp->m_logbsize))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) mp->m_logbsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) xfs_fc_fill_super(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct fs_context *fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) struct xfs_mount *mp = sb->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) struct inode *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) int flags = 0, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) mp->m_super = sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) error = xfs_fc_validate_params(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) goto out_free_names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) sb_min_blocksize(sb, BBSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) sb->s_xattr = xfs_xattr_handlers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) sb->s_export_op = &xfs_export_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) #ifdef CONFIG_XFS_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) sb->s_qcop = &xfs_quotactl_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) sb->s_op = &xfs_super_operations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * Delay mount work if the debug hook is set. This is debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * instrumention to coordinate simulation of xfs mount failures with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * VFS superblock operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (xfs_globals.mount_delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) xfs_notice(mp, "Delaying mount for %d seconds.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) xfs_globals.mount_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) msleep(xfs_globals.mount_delay * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (fc->sb_flags & SB_SILENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) flags |= XFS_MFSI_QUIET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) error = xfs_open_devices(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) goto out_free_names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) error = xfs_init_mount_workqueues(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) goto out_close_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) error = xfs_init_percpu_counters(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) goto out_destroy_workqueues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) /* Allocate stats memory before we do operations that might use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) if (!mp->m_stats.xs_stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) goto out_destroy_counters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) error = xfs_readsb(mp, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) goto out_free_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) error = xfs_finish_flags(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) goto out_free_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) error = xfs_setup_devices(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) goto out_free_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /* V4 support is undergoing deprecation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) if (!xfs_sb_version_hascrc(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) #ifdef CONFIG_XFS_SUPPORT_V4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) xfs_warn_once(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) "Deprecated V4 format (crc=0) will not be supported after September 2030.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) "Deprecated V4 format (crc=0) not supported by kernel.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) goto out_free_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * XFS block mappings use 54 bits to store the logical block offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * This should suffice to handle the maximum file size that the VFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * to check this assertion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) * Avoid integer overflow by comparing the maximum bmbt offset to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) * maximum pagecache offset in units of fs blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE) > XFS_MAX_FILEOFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) "MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) XFS_MAX_FILEOFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) goto out_free_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) error = xfs_filestream_mount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) goto out_free_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * we must configure the block size in the superblock before we run the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * full mount process as the mount process can lookup and cache inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) sb->s_magic = XFS_SUPER_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) sb->s_blocksize = mp->m_sb.sb_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) sb->s_maxbytes = MAX_LFS_FILESIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) sb->s_max_links = XFS_MAXLINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) sb->s_time_gran = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) sb->s_time_min = XFS_LEGACY_TIME_MIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) sb->s_time_max = XFS_LEGACY_TIME_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) sb->s_iflags |= SB_I_CGROUPWB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) set_posix_acl_flag(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) /* version 5 superblocks support inode version counters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) sb->s_flags |= SB_I_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (xfs_sb_version_hasbigtime(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) "EXPERIMENTAL big timestamp feature in use. Use at your own risk!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) bool rtdev_is_dax = false, datadev_is_dax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) sb->s_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) if (mp->m_rtdev_targp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) rtdev_is_dax = bdev_dax_supported(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (!rtdev_is_dax && !datadev_is_dax) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) xfs_alert(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) "DAX unsupported by block device. Turning off DAX.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if (xfs_sb_version_hasreflink(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) xfs_alert(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) "DAX and reflink cannot be used together!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) goto out_filestream_unmount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (mp->m_flags & XFS_MOUNT_DISCARD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) struct request_queue *q = bdev_get_queue(sb->s_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (!blk_queue_discard(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) xfs_warn(mp, "mounting with \"discard\" option, but "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) "the device does not support discard");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) mp->m_flags &= ~XFS_MOUNT_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (xfs_sb_version_hasreflink(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (mp->m_sb.sb_rblocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) xfs_alert(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) "reflink not compatible with realtime device!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) goto out_filestream_unmount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (xfs_globals.always_cow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) xfs_info(mp, "using DEBUG-only always_cow mode.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) mp->m_always_cow = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) xfs_alert(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) "reverse mapping btree not compatible with realtime device!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) goto out_filestream_unmount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) if (xfs_sb_version_hasinobtcounts(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) "EXPERIMENTAL inode btree counters feature in use. Use at your own risk!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) error = xfs_mountfs(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) goto out_filestream_unmount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) root = igrab(VFS_I(mp->m_rootip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (!root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) goto out_unmount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) sb->s_root = d_make_root(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (!sb->s_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) goto out_unmount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) out_filestream_unmount:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) xfs_filestream_unmount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) out_free_sb:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) xfs_freesb(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) out_free_stats:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) free_percpu(mp->m_stats.xs_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) out_destroy_counters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) xfs_destroy_percpu_counters(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) out_destroy_workqueues:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) xfs_destroy_mount_workqueues(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) out_close_devices:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) xfs_close_devices(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) out_free_names:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) sb->s_fs_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) xfs_mount_free(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) out_unmount:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) xfs_filestream_unmount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) xfs_unmountfs(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) goto out_free_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) xfs_fc_get_tree(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) struct fs_context *fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) return get_tree_bdev(fc, xfs_fc_fill_super);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) xfs_remount_rw(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) struct xfs_sb *sbp = &mp->m_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) "ro->rw transition prohibited on norecovery mount");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) (sbp->sb_features_ro_compat &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) mp->m_flags &= ~XFS_MOUNT_RDONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * If this is the first remount to writeable state we might have some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * superblock changes to update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) if (mp->m_update_sb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) error = xfs_sync_sb(mp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) xfs_warn(mp, "failed to write sb changes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) mp->m_update_sb = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) * Fill out the reserve pool if it is empty. Use the stashed value if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * it is non-zero, otherwise go with the default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) xfs_restore_resvblks(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) xfs_log_work_queue(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) /* Recover any CoW blocks that never got remapped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) error = xfs_reflink_recover_cow(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) xfs_err(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) "Error %d recovering leftover CoW allocations.", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) xfs_start_block_reaping(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) /* Create the per-AG metadata reservation pool .*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) error = xfs_fs_reserve_ag_blocks(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (error && error != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) xfs_remount_ro(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * Cancel background eofb scanning so it cannot race with the final
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) * log force+buftarg wait and deadlock the remount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) xfs_stop_block_reaping(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) /* Get rid of any leftover CoW reservations... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) error = xfs_icache_free_cowblocks(mp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) /* Free the per-AG metadata reservation pool. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) error = xfs_fs_unreserve_ag_blocks(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * Before we sync the metadata, we need to free up the reserve block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) * pool so that the used block count in the superblock on disk is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * correct at the end of the remount. Stash the current* reserve pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) * size so that if we get remounted rw, we can return it to the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) xfs_save_resvblks(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) xfs_quiesce_attr(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) mp->m_flags |= XFS_MOUNT_RDONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * Logically we would return an error here to prevent users from believing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * they might have changed mount options using remount which can't be changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) * But unfortunately mount(8) adds all options from mtab and fstab to the mount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) * arguments in some cases so we can't blindly reject options, but have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) * check for each specified option if it actually differs from the currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) * set option and only reject it if that's the case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * Until that is implemented we return success for every remount request, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * silently ignore all options that we can't actually change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) xfs_fc_reconfigure(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) struct fs_context *fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) struct xfs_mount *mp = XFS_M(fc->root->d_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct xfs_mount *new_mp = fc->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) xfs_sb_t *sbp = &mp->m_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) int flags = fc->sb_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) /* version 5 superblocks always support version counters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) fc->sb_flags |= SB_I_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) error = xfs_fc_validate_params(new_mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) sync_filesystem(mp->m_super);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) /* inode32 -> inode64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) /* inode64 -> inode32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) /* ro -> rw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) error = xfs_remount_rw(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) /* rw -> ro */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) error = xfs_remount_ro(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) static void xfs_fc_free(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) struct fs_context *fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) struct xfs_mount *mp = fc->s_fs_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) * mp is stored in the fs_context when it is initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) * mp is transferred to the superblock on a successful mount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * but if an error occurs before the transfer we have to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) * it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) xfs_mount_free(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) static const struct fs_context_operations xfs_context_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) .parse_param = xfs_fc_parse_param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) .get_tree = xfs_fc_get_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) .reconfigure = xfs_fc_reconfigure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) .free = xfs_fc_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) static int xfs_init_fs_context(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) struct fs_context *fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) struct xfs_mount *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (!mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) spin_lock_init(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) spin_lock_init(&mp->m_agirotor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) spin_lock_init(&mp->m_perag_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) mutex_init(&mp->m_growlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) mp->m_kobj.kobject.kset = xfs_kset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) * We don't create the finobt per-ag space reservation until after log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) * recovery, so we must set this to true so that an ifree transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) * started during log recovery will not depend on space reservations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) * for finobt expansion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) mp->m_finobt_nores = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * These can be overridden by the mount option parsing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) mp->m_logbufs = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) mp->m_logbsize = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) mp->m_allocsize_log = 16; /* 64k */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * Copy binary VFS mount flags we are interested in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) if (fc->sb_flags & SB_RDONLY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) mp->m_flags |= XFS_MOUNT_RDONLY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) if (fc->sb_flags & SB_DIRSYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) mp->m_flags |= XFS_MOUNT_DIRSYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) if (fc->sb_flags & SB_SYNCHRONOUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) mp->m_flags |= XFS_MOUNT_WSYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) fc->s_fs_info = mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) fc->ops = &xfs_context_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) static struct file_system_type xfs_fs_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) .name = "xfs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) .init_fs_context = xfs_init_fs_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) .parameters = xfs_fs_parameters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) .kill_sb = kill_block_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) .fs_flags = FS_REQUIRES_DEV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) MODULE_ALIAS_FS("xfs");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) STATIC int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) xfs_init_zones(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) sizeof(struct xlog_ticket),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) if (!xfs_log_ticket_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) sizeof(struct xfs_extent_free_item),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) if (!xfs_bmap_free_item_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) goto out_destroy_log_ticket_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) sizeof(struct xfs_btree_cur),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (!xfs_btree_cur_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) goto out_destroy_bmap_free_item_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) xfs_da_state_zone = kmem_cache_create("xfs_da_state",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) sizeof(struct xfs_da_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) if (!xfs_da_state_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) goto out_destroy_btree_cur_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) xfs_ifork_zone = kmem_cache_create("xfs_ifork",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) sizeof(struct xfs_ifork),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (!xfs_ifork_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) goto out_destroy_da_state_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) xfs_trans_zone = kmem_cache_create("xf_trans",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) sizeof(struct xfs_trans),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) if (!xfs_trans_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) goto out_destroy_ifork_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * The size of the zone allocated buf log item is the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * size possible under XFS. This wastes a little bit of memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * but it is much faster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) sizeof(struct xfs_buf_log_item),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (!xfs_buf_item_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) goto out_destroy_trans_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) xfs_efd_zone = kmem_cache_create("xfs_efd_item",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) (sizeof(struct xfs_efd_log_item) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) (XFS_EFD_MAX_FAST_EXTENTS - 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) sizeof(struct xfs_extent)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (!xfs_efd_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) goto out_destroy_buf_item_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) xfs_efi_zone = kmem_cache_create("xfs_efi_item",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) (sizeof(struct xfs_efi_log_item) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) (XFS_EFI_MAX_FAST_EXTENTS - 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) sizeof(struct xfs_extent)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) if (!xfs_efi_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) goto out_destroy_efd_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) xfs_inode_zone = kmem_cache_create("xfs_inode",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) sizeof(struct xfs_inode), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) (SLAB_HWCACHE_ALIGN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) SLAB_RECLAIM_ACCOUNT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) SLAB_MEM_SPREAD | SLAB_ACCOUNT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) xfs_fs_inode_init_once);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if (!xfs_inode_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) goto out_destroy_efi_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) xfs_ili_zone = kmem_cache_create("xfs_ili",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) sizeof(struct xfs_inode_log_item), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) if (!xfs_ili_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) goto out_destroy_inode_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) xfs_icreate_zone = kmem_cache_create("xfs_icr",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) sizeof(struct xfs_icreate_item),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) if (!xfs_icreate_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) goto out_destroy_ili_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) xfs_rud_zone = kmem_cache_create("xfs_rud_item",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) sizeof(struct xfs_rud_log_item),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) if (!xfs_rud_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) goto out_destroy_icreate_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) xfs_rui_zone = kmem_cache_create("xfs_rui_item",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) if (!xfs_rui_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) goto out_destroy_rud_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) xfs_cud_zone = kmem_cache_create("xfs_cud_item",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) sizeof(struct xfs_cud_log_item),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) if (!xfs_cud_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) goto out_destroy_rui_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) xfs_cui_zone = kmem_cache_create("xfs_cui_item",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) if (!xfs_cui_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) goto out_destroy_cud_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) xfs_bud_zone = kmem_cache_create("xfs_bud_item",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) sizeof(struct xfs_bud_log_item),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) if (!xfs_bud_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) goto out_destroy_cui_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) xfs_bui_zone = kmem_cache_create("xfs_bui_item",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (!xfs_bui_zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) goto out_destroy_bud_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) out_destroy_bud_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) kmem_cache_destroy(xfs_bud_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) out_destroy_cui_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) kmem_cache_destroy(xfs_cui_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) out_destroy_cud_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) kmem_cache_destroy(xfs_cud_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) out_destroy_rui_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) kmem_cache_destroy(xfs_rui_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) out_destroy_rud_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) kmem_cache_destroy(xfs_rud_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) out_destroy_icreate_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) kmem_cache_destroy(xfs_icreate_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) out_destroy_ili_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) kmem_cache_destroy(xfs_ili_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) out_destroy_inode_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) kmem_cache_destroy(xfs_inode_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) out_destroy_efi_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) kmem_cache_destroy(xfs_efi_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) out_destroy_efd_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) kmem_cache_destroy(xfs_efd_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) out_destroy_buf_item_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) kmem_cache_destroy(xfs_buf_item_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) out_destroy_trans_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) kmem_cache_destroy(xfs_trans_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) out_destroy_ifork_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) kmem_cache_destroy(xfs_ifork_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) out_destroy_da_state_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) kmem_cache_destroy(xfs_da_state_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) out_destroy_btree_cur_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) kmem_cache_destroy(xfs_btree_cur_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) out_destroy_bmap_free_item_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) kmem_cache_destroy(xfs_bmap_free_item_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) out_destroy_log_ticket_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) kmem_cache_destroy(xfs_log_ticket_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) xfs_destroy_zones(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) * Make sure all delayed rcu free are flushed before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * destroy caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) kmem_cache_destroy(xfs_bui_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) kmem_cache_destroy(xfs_bud_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) kmem_cache_destroy(xfs_cui_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) kmem_cache_destroy(xfs_cud_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) kmem_cache_destroy(xfs_rui_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) kmem_cache_destroy(xfs_rud_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) kmem_cache_destroy(xfs_icreate_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) kmem_cache_destroy(xfs_ili_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) kmem_cache_destroy(xfs_inode_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) kmem_cache_destroy(xfs_efi_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) kmem_cache_destroy(xfs_efd_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) kmem_cache_destroy(xfs_buf_item_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) kmem_cache_destroy(xfs_trans_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) kmem_cache_destroy(xfs_ifork_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) kmem_cache_destroy(xfs_da_state_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) kmem_cache_destroy(xfs_btree_cur_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) kmem_cache_destroy(xfs_bmap_free_item_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) kmem_cache_destroy(xfs_log_ticket_zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) STATIC int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) xfs_init_workqueues(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) * The allocation workqueue can be used in memory reclaim situations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) * (writepage path), and parallelism is only limited by the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) * AGs in all the filesystems mounted. Hence use the default large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) * max_active value for this workqueue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) xfs_alloc_wq = alloc_workqueue("xfsalloc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (!xfs_alloc_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (!xfs_discard_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) goto out_free_alloc_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) out_free_alloc_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) destroy_workqueue(xfs_alloc_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) xfs_destroy_workqueues(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) destroy_workqueue(xfs_discard_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) destroy_workqueue(xfs_alloc_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) STATIC int __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) init_xfs_fs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) xfs_check_ondisk_structs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) printk(KERN_INFO XFS_VERSION_STRING " with "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) XFS_BUILD_OPTIONS " enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) xfs_dir_startup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) error = xfs_init_zones();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) error = xfs_init_workqueues();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) goto out_destroy_zones;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) error = xfs_mru_cache_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) goto out_destroy_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) error = xfs_buf_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) goto out_mru_cache_uninit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) error = xfs_init_procfs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) goto out_buf_terminate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) error = xfs_sysctl_register();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) goto out_cleanup_procfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (!xfs_kset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) goto out_sysctl_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) xfsstats.xs_kobj.kobject.kset = xfs_kset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) xfsstats.xs_stats = alloc_percpu(struct xfsstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) if (!xfsstats.xs_stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) goto out_kset_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) "stats");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) goto out_free_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) xfs_dbg_kobj.kobject.kset = xfs_kset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) goto out_remove_stats_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) error = xfs_qm_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) goto out_remove_dbg_kobj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) error = register_filesystem(&xfs_fs_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) goto out_qm_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) out_qm_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) xfs_qm_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) out_remove_dbg_kobj:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) xfs_sysfs_del(&xfs_dbg_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) out_remove_stats_kobj:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) xfs_sysfs_del(&xfsstats.xs_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) out_free_stats:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) free_percpu(xfsstats.xs_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) out_kset_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) kset_unregister(xfs_kset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) out_sysctl_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) xfs_sysctl_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) out_cleanup_procfs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) xfs_cleanup_procfs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) out_buf_terminate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) xfs_buf_terminate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) out_mru_cache_uninit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) xfs_mru_cache_uninit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) out_destroy_wq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) xfs_destroy_workqueues();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) out_destroy_zones:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) xfs_destroy_zones();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) STATIC void __exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) exit_xfs_fs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) xfs_qm_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) unregister_filesystem(&xfs_fs_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) xfs_sysfs_del(&xfs_dbg_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) xfs_sysfs_del(&xfsstats.xs_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) free_percpu(xfsstats.xs_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) kset_unregister(xfs_kset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) xfs_sysctl_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) xfs_cleanup_procfs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) xfs_buf_terminate();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) xfs_mru_cache_uninit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) xfs_destroy_workqueues();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) xfs_destroy_zones();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) xfs_uuid_table_free();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) module_init(init_xfs_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) module_exit(exit_xfs_fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) MODULE_AUTHOR("Silicon Graphics, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) MODULE_IMPORT_NS(ANDROID_GKI_VFS_EXPORT_ONLY);