^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2000-2005 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "xfs_log_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "xfs_bit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "xfs_sb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "xfs_inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "xfs_dir2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "xfs_ialloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "xfs_alloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "xfs_rtalloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "xfs_bmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "xfs_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "xfs_trans_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "xfs_log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "xfs_error.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "xfs_quota.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "xfs_fsops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "xfs_icache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "xfs_sysfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "xfs_rmap_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "xfs_refcount_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "xfs_reflink.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "xfs_extent_busy.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "xfs_health.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "xfs_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static DEFINE_MUTEX(xfs_uuid_table_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static int xfs_uuid_table_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static uuid_t *xfs_uuid_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) xfs_uuid_table_free(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (xfs_uuid_table_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) kmem_free(xfs_uuid_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) xfs_uuid_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) xfs_uuid_table_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * See if the UUID is unique among mounted XFS filesystems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) xfs_uuid_mount(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) uuid_t *uuid = &mp->m_sb.sb_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) int hole, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Publish UUID in struct super_block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) uuid_copy(&mp->m_super->s_uuid, uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (mp->m_flags & XFS_MOUNT_NOUUID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (uuid_is_null(uuid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) xfs_warn(mp, "Filesystem has null UUID - can't mount");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) mutex_lock(&xfs_uuid_table_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) for (i = 0, hole = -1; i < xfs_uuid_table_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (uuid_is_null(&xfs_uuid_table[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) hole = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (uuid_equal(uuid, &xfs_uuid_table[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) goto out_duplicate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (hole < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) xfs_uuid_table = krealloc(xfs_uuid_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) (xfs_uuid_table_size + 1) * sizeof(*xfs_uuid_table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) GFP_KERNEL | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) hole = xfs_uuid_table_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) xfs_uuid_table[hole] = *uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) mutex_unlock(&xfs_uuid_table_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) out_duplicate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) mutex_unlock(&xfs_uuid_table_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) xfs_warn(mp, "Filesystem has duplicate UUID %pU - can't mount", uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) xfs_uuid_unmount(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) uuid_t *uuid = &mp->m_sb.sb_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) if (mp->m_flags & XFS_MOUNT_NOUUID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) mutex_lock(&xfs_uuid_table_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) for (i = 0; i < xfs_uuid_table_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (uuid_is_null(&xfs_uuid_table[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (!uuid_equal(uuid, &xfs_uuid_table[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) memset(&xfs_uuid_table[i], 0, sizeof(uuid_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ASSERT(i < xfs_uuid_table_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) mutex_unlock(&xfs_uuid_table_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) __xfs_free_perag(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ASSERT(atomic_read(&pag->pag_ref) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) kmem_free(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * Free up the per-ag resources associated with the mount structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) xfs_free_perag(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) xfs_mount_t *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) xfs_agnumber_t agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct xfs_perag *pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) spin_lock(&mp->m_perag_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) pag = radix_tree_delete(&mp->m_perag_tree, agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) spin_unlock(&mp->m_perag_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ASSERT(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ASSERT(atomic_read(&pag->pag_ref) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) xfs_iunlink_destroy(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) xfs_buf_hash_destroy(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) call_rcu(&pag->rcu_head, __xfs_free_perag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * Check size of device based on the (data/realtime) block count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Note: this check is used by the growfs code as well as mount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) xfs_sb_validate_fsb_count(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) xfs_sb_t *sbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) uint64_t nblocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) ASSERT(sbp->sb_blocklog >= BBSHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* Limited by ULONG_MAX of page cache index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) xfs_initialize_perag(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) xfs_mount_t *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) xfs_agnumber_t agcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) xfs_agnumber_t *maxagi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) xfs_agnumber_t index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) xfs_agnumber_t first_initialised = NULLAGNUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) xfs_perag_t *pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * Walk the current per-ag tree so we don't try to initialise AGs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * that already exist (growfs case). Allocate and insert all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * AGs we don't find ready for initialisation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) for (index = 0; index < agcount; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) pag = xfs_perag_get(mp, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (pag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) xfs_perag_put(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (!pag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) goto out_unwind_new_pags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) pag->pag_agno = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) pag->pag_mount = mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) spin_lock_init(&pag->pag_ici_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) error = xfs_buf_hash_init(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) goto out_free_pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) init_waitqueue_head(&pag->pagb_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) spin_lock_init(&pag->pagb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) pag->pagb_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) pag->pagb_tree = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) error = radix_tree_preload(GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) goto out_hash_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) spin_lock(&mp->m_perag_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) spin_unlock(&mp->m_perag_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) radix_tree_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) error = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) goto out_hash_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) spin_unlock(&mp->m_perag_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) radix_tree_preload_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* first new pag is fully initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (first_initialised == NULLAGNUMBER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) first_initialised = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) error = xfs_iunlink_init(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) goto out_hash_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) spin_lock_init(&pag->pag_state_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) index = xfs_set_inode_alloc(mp, agcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (maxagi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) *maxagi = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) out_hash_destroy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) xfs_buf_hash_destroy(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) out_free_pag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) kmem_free(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) out_unwind_new_pags:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) /* unwind any prior newly initialized pags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) for (index = first_initialised; index < agcount; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) pag = radix_tree_delete(&mp->m_perag_tree, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (!pag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) xfs_buf_hash_destroy(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) xfs_iunlink_destroy(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) kmem_free(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * xfs_readsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * Does the initial read of the superblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) xfs_readsb(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) unsigned int sector_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct xfs_buf *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) struct xfs_sb *sbp = &mp->m_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) int loud = !(flags & XFS_MFSI_QUIET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) const struct xfs_buf_ops *buf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ASSERT(mp->m_sb_bp == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) ASSERT(mp->m_ddev_targp != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * For the initial read, we must guess at the sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * size based on the block device. It's enough to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * get the sb_sectsize out of the superblock and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * then reread with the proper length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * We don't verify it yet, because it may not be complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) buf_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * Allocate a (locked) buffer to hold the superblock. This will be kept
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * around at all times to optimize access to the superblock. Therefore,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * elevated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) reread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) BTOBB(sector_size), XBF_NO_IOACCT, &bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) buf_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (loud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) xfs_warn(mp, "SB validate failed with error %d.", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* bad CRC means corrupted metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (error == -EFSBADCRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Initialize the mount structure from the superblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) xfs_sb_from_disk(sbp, bp->b_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * If we haven't validated the superblock, do so now before we try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * to check the sector size and reread the superblock appropriately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (sbp->sb_magicnum != XFS_SB_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (loud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) xfs_warn(mp, "Invalid superblock magic number");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) goto release_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * We must be able to do sector-sized and sector-aligned IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (sector_size > sbp->sb_sectsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (loud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) xfs_warn(mp, "device supports %u byte sectors (not %u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) sector_size, sbp->sb_sectsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) error = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) goto release_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (buf_ops == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * Re-read the superblock so the buffer is correctly sized,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * and properly verified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) sector_size = sbp->sb_sectsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) goto reread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) xfs_reinit_percpu_counters(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* no need to be quiet anymore, so reset the buf ops */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) bp->b_ops = &xfs_sb_buf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) mp->m_sb_bp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) xfs_buf_unlock(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) release_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * If the sunit/swidth change would move the precomputed root inode value, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * must reject the ondisk change because repair will stumble over that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * However, we allow the mount to proceed because we never rejected this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * combination before. Returns true to update the sb, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) xfs_check_new_dalign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int new_dalign,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) bool *update_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) struct xfs_sb *sbp = &mp->m_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) xfs_ino_t calc_ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) calc_ino = xfs_ialloc_calc_rootino(mp, new_dalign);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) trace_xfs_check_new_dalign(mp, new_dalign, calc_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (sbp->sb_rootino == calc_ino) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) *update_sb = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) "Cannot change stripe alignment; would require moving root inode.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * XXX: Next time we add a new incompat feature, this should start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * returning -EINVAL to fail the mount. Until then, spit out a warning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * that we're ignoring the administrator's instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) xfs_warn(mp, "Skipping superblock stripe alignment update.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) *update_sb = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * If we were provided with new sunit/swidth values as mount options, make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * that they pass basic alignment and superblock feature checks, and convert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * them into the same units (FSB) that everything else expects. This step
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * /must/ be done before computing the inode geometry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) xfs_validate_new_dalign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (mp->m_dalign == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * If stripe unit and stripe width are not multiples
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * of the fs blocksize turn off alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) "alignment check failed: sunit/swidth vs. blocksize(%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) mp->m_sb.sb_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * Convert the stripe unit and width to FSBs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (mp->m_dalign && (mp->m_sb.sb_agblocks % mp->m_dalign)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) "alignment check failed: sunit/swidth vs. agsize(%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) mp->m_sb.sb_agblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) } else if (mp->m_dalign) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) "alignment check failed: sunit(%d) less than bsize(%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) mp->m_dalign, mp->m_sb.sb_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (!xfs_sb_version_hasdalign(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) "cannot change alignment: superblock does not support data alignment");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* Update alignment values based on mount options and sb values. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) xfs_update_alignment(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct xfs_sb *sbp = &mp->m_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (mp->m_dalign) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) bool update_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (sbp->sb_unit == mp->m_dalign &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) sbp->sb_width == mp->m_swidth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) error = xfs_check_new_dalign(mp, mp->m_dalign, &update_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (error || !update_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) sbp->sb_unit = mp->m_dalign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) sbp->sb_width = mp->m_swidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) mp->m_update_sb = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) xfs_sb_version_hasdalign(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) mp->m_dalign = sbp->sb_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) mp->m_swidth = sbp->sb_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * precalculate the low space thresholds for dynamic speculative preallocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) xfs_set_low_space_thresholds(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) for (i = 0; i < XFS_LOWSP_MAX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) uint64_t space = mp->m_sb.sb_dblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) do_div(space, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) mp->m_low_space[i] = space * (i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * Check that the data (and log if separate) is an ok size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) xfs_check_sizes(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct xfs_buf *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) xfs_daddr_t d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) xfs_warn(mp, "filesystem size mismatch detected");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) error = xfs_buf_read_uncached(mp->m_ddev_targp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) d - XFS_FSS_TO_BB(mp, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) xfs_warn(mp, "last sector read failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (mp->m_logdev_targp == mp->m_ddev_targp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) xfs_warn(mp, "log size mismatch detected");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return -EFBIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) error = xfs_buf_read_uncached(mp->m_logdev_targp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) d - XFS_FSB_TO_BB(mp, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) XFS_FSB_TO_BB(mp, 1), 0, &bp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) xfs_warn(mp, "log device read failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * Clear the quotaflags in memory and in the superblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) xfs_mount_reset_sbqflags(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) mp->m_qflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* It is OK to look at sb_qflags in the mount path without m_sb_lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (mp->m_sb.sb_qflags == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) spin_lock(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) mp->m_sb.sb_qflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) spin_unlock(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return xfs_sync_sb(mp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) uint64_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) xfs_default_resblks(xfs_mount_t *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) uint64_t resblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * We default to 5% or 8192 fsbs of space reserved, whichever is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * smaller. This is intended to cover concurrent allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * transactions when we initially hit enospc. These each require a 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * block reservation. Hence by default we cover roughly 2000 concurrent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * allocation reservations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) resblks = mp->m_sb.sb_dblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) do_div(resblks, 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) resblks = min_t(uint64_t, resblks, 8192);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) return resblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) /* Ensure the summary counts are correct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) xfs_check_summary_counts(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * The AG0 superblock verifier rejects in-progress filesystems,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * so we should never see the flag set this far into mounting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (mp->m_sb.sb_inprogress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) xfs_err(mp, "sb_inprogress set after log recovery??");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * Now the log is mounted, we know if it was an unclean shutdown or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * not. If it was, with the first phase of recovery has completed, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * have consistent AG blocks on disk. We have not recovered EFIs yet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * but they are recovered transactionally in the second recovery phase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * If the log was clean when we mounted, we can check the summary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * counters. If any of them are obviously incorrect, we can recompute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * them from the AGF headers in the next step.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (XFS_LAST_UNMOUNT_WAS_CLEAN(mp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) (mp->m_sb.sb_fdblocks > mp->m_sb.sb_dblocks ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) !xfs_verify_icount(mp, mp->m_sb.sb_icount) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) mp->m_sb.sb_ifree > mp->m_sb.sb_icount))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * We can safely re-initialise incore superblock counters from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * per-ag data. These may not be correct if the filesystem was not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * cleanly unmounted, so we waited for recovery to finish before doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * If the filesystem was cleanly unmounted or the previous check did
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * not flag anything weird, then we can trust the values in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * superblock to be correct and we don't need to do anything here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * Otherwise, recalculate the summary counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if ((!xfs_sb_version_haslazysbcount(&mp->m_sb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) XFS_LAST_UNMOUNT_WAS_CLEAN(mp)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) !xfs_fs_has_sickness(mp, XFS_SICK_FS_COUNTERS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return xfs_initialize_perag_data(mp, mp->m_sb.sb_agcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * This function does the following on an initial mount of a file system:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * - reads the superblock from disk and init the mount struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * - if we're a 32-bit kernel, do a size check on the superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * so we don't mount terabyte filesystems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * - init mount struct realtime fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * - allocate inode hash table for fs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * - init directory manager
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * - perform recovery and init the log manager
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) xfs_mountfs(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct xfs_sb *sbp = &(mp->m_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct xfs_inode *rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct xfs_ino_geometry *igeo = M_IGEO(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) uint64_t resblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) uint quotamount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) uint quotaflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) xfs_sb_mount_common(mp, sbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * Check for a mismatched features2 values. Older kernels read & wrote
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * into the wrong sb offset for sb_features2 on some platforms due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * xfs_sb_t not being 64bit size aligned when sb_features2 was added,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * which made older superblock reading/writing routines swap it as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * 64-bit value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * For backwards compatibility, we make both slots equal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * If we detect a mismatched field, we OR the set bits into the existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * features2 field in case it has already been modified; we don't want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * to lose any features. We then update the bad location with the ORed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * value so that older kernels will see any features2 flags. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * superblock writeback code ensures the new sb_features2 is copied to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * sb_bad_features2 before it is logged or written to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (xfs_sb_has_mismatched_features2(sbp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) xfs_warn(mp, "correcting sb_features alignment problem");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) sbp->sb_features2 |= sbp->sb_bad_features2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) mp->m_update_sb = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * Re-check for ATTR2 in case it was found in bad_features2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (xfs_sb_version_hasattr2(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) !(mp->m_flags & XFS_MOUNT_NOATTR2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) mp->m_flags |= XFS_MOUNT_ATTR2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (xfs_sb_version_hasattr2(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) (mp->m_flags & XFS_MOUNT_NOATTR2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) xfs_sb_version_removeattr2(&mp->m_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) mp->m_update_sb = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /* update sb_versionnum for the clearing of the morebits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (!sbp->sb_features2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) mp->m_update_sb = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /* always use v2 inodes by default now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (!(mp->m_sb.sb_versionnum & XFS_SB_VERSION_NLINKBIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) mp->m_sb.sb_versionnum |= XFS_SB_VERSION_NLINKBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) mp->m_update_sb = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * If we were given new sunit/swidth options, do some basic validation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * checks and convert the incore dalign and swidth values to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) * same units (FSB) that everything else uses. This /must/ happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * before computing the inode geometry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) error = xfs_validate_new_dalign(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) xfs_alloc_compute_maxlevels(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) xfs_ialloc_setup_geometry(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) xfs_rmapbt_compute_maxlevels(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) xfs_refcountbt_compute_maxlevels(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * Check if sb_agblocks is aligned at stripe boundary. If sb_agblocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * is NOT aligned turn off m_dalign since allocator alignment is within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * an ag, therefore ag has to be aligned at stripe boundary. Note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * we must compute the free space and rmap btree geometry before doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) error = xfs_update_alignment(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* enable fail_at_unmount as default */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) mp->m_fail_unmount = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) NULL, mp->m_super->s_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) error = xfs_sysfs_init(&mp->m_stats.xs_kobj, &xfs_stats_ktype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) &mp->m_kobj, "stats");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) goto out_remove_sysfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) error = xfs_error_sysfs_init(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) goto out_del_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) error = xfs_errortag_init(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) goto out_remove_error_sysfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) error = xfs_uuid_mount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) goto out_remove_errortag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * Update the preferred write size based on the information from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * on-disk superblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) mp->m_allocsize_log =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) max_t(uint32_t, sbp->sb_blocklog, mp->m_allocsize_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) mp->m_allocsize_blocks = 1U << (mp->m_allocsize_log - sbp->sb_blocklog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /* set the low space thresholds for dynamic preallocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) xfs_set_low_space_thresholds(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * If enabled, sparse inode chunk alignment is expected to match the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) * cluster size. Full inode chunk alignment must match the chunk size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) * but that is checked on sb read verification...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) if (xfs_sb_version_hassparseinodes(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) mp->m_sb.sb_spino_align !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) "Sparse inode block alignment (%u) must match cluster size (%llu).",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) mp->m_sb.sb_spino_align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) XFS_B_TO_FSBT(mp, igeo->inode_cluster_size_raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) goto out_remove_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * Check that the data (and log if separate) is an ok size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) error = xfs_check_sizes(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) goto out_remove_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * Initialize realtime fields in the mount structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) error = xfs_rtmount_init(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) xfs_warn(mp, "RT mount failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) goto out_remove_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * Copies the low order bits of the timestamp and the randomly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * set "sequence" number out of a UUID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) mp->m_fixedfsid[0] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) (get_unaligned_be16(&sbp->sb_uuid.b[8]) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) get_unaligned_be16(&sbp->sb_uuid.b[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) mp->m_fixedfsid[1] = get_unaligned_be32(&sbp->sb_uuid.b[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) error = xfs_da_mount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) xfs_warn(mp, "Failed dir/attr init: %d", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) goto out_remove_uuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * Initialize the precomputed transaction reservations values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) xfs_trans_init(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * Allocate and initialize the per-ag data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) xfs_warn(mp, "Failed per-ag init: %d", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) goto out_free_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (XFS_IS_CORRUPT(mp, !sbp->sb_logblocks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) xfs_warn(mp, "no log defined");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) goto out_free_perag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * Log's mount-time initialization. The first part of recovery can place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * some items on the AIL, to be handled when recovery is finished or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * cancelled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) error = xfs_log_mount(mp, mp->m_logdev_targp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) xfs_warn(mp, "log mount failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) goto out_fail_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* Make sure the summary counts are ok. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) error = xfs_check_summary_counts(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) goto out_log_dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * Get and sanity-check the root inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * Save the pointer to it in the mount structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) error = xfs_iget(mp, NULL, sbp->sb_rootino, XFS_IGET_UNTRUSTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) XFS_ILOCK_EXCL, &rip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) "Failed to read root inode 0x%llx, error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) sbp->sb_rootino, -error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) goto out_log_dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) ASSERT(rip != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (XFS_IS_CORRUPT(mp, !S_ISDIR(VFS_I(rip)->i_mode))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) xfs_warn(mp, "corrupted root inode %llu: not a directory",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) (unsigned long long)rip->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) xfs_iunlock(rip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) goto out_rele_rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) mp->m_rootip = rip; /* save it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) xfs_iunlock(rip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * Initialize realtime inode pointers in the mount structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) error = xfs_rtmount_inodes(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * Free up the root inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) xfs_warn(mp, "failed to read RT inodes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) goto out_rele_rip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * If this is a read-only mount defer the superblock updates until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * the next remount into writeable mode. Otherwise we would never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * perform the update e.g. for the root filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (mp->m_update_sb && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) error = xfs_sync_sb(mp, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) xfs_warn(mp, "failed to write sb changes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) goto out_rtunmount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * Initialise the XFS quota management subsystem for this mount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (XFS_IS_QUOTA_RUNNING(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) error = xfs_qm_newmount(mp, "amount, "aflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) goto out_rtunmount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) ASSERT(!XFS_IS_QUOTA_ON(mp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * If a file system had quotas running earlier, but decided to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * mount without -o uquota/pquota/gquota options, revoke the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * quotachecked license.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) if (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) xfs_notice(mp, "resetting quota flags");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) error = xfs_mount_reset_sbqflags(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) goto out_rtunmount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) * Finish recovering the file system. This part needed to be delayed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * until after the root and real-time bitmap inodes were consistently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * read in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) error = xfs_log_mount_finish(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) xfs_warn(mp, "log mount finish failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) goto out_rtunmount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * Now the log is fully replayed, we can transition to full read-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * mode for read-only mounts. This will sync all the metadata and clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * the log so that the recovery we just performed does not have to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * replayed again on the next mount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * We use the same quiesce mechanism as the rw->ro remount, as they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * semantically identical operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if ((mp->m_flags & (XFS_MOUNT_RDONLY|XFS_MOUNT_NORECOVERY)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) XFS_MOUNT_RDONLY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) xfs_quiesce_attr(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * Complete the quota initialisation, post-log-replay component.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) if (quotamount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) ASSERT(mp->m_qflags == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) mp->m_qflags = quotaflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) xfs_qm_mount_quotas(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * Now we are mounted, reserve a small amount of unused space for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * privileged transactions. This is needed so that transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * space required for critical operations can dip into this pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * when at ENOSPC. This is needed for operations like create with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * attr, unwritten extent conversion at ENOSPC, etc. Data allocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * are not allowed to use this reserved space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * This may drive us straight to ENOSPC on mount, but that implies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * we were already there on the last unmount. Warn if this occurs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) resblks = xfs_default_resblks(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) error = xfs_reserve_blocks(mp, &resblks, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) "Unable to allocate reserve blocks. Continuing without reserve pool.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) /* Recover any CoW blocks that never got remapped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) error = xfs_reflink_recover_cow(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) xfs_err(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) "Error %d recovering leftover CoW allocations.", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) goto out_quota;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) /* Reserve AG blocks for future btree expansion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) error = xfs_fs_reserve_ag_blocks(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (error && error != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) goto out_agresv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) out_agresv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) xfs_fs_unreserve_ag_blocks(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) out_quota:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) xfs_qm_unmount_quotas(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) out_rtunmount:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) xfs_rtunmount_inodes(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) out_rele_rip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) xfs_irele(rip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) /* Clean out dquots that might be in memory after quotacheck. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) xfs_qm_unmount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * Cancel all delayed reclaim work and reclaim the inodes directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * We have to do this /after/ rtunmount and qm_unmount because those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * two will have scheduled delayed reclaim for the rt/quota inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * This is slightly different from the unmountfs call sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * because we could be tearing down a partially set up mount. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * particular, if log_mount_finish fails we bail out without calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * qm_unmount_quotas and therefore rely on qm_unmount to release the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * quota inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) cancel_delayed_work_sync(&mp->m_reclaim_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) xfs_reclaim_inodes(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) xfs_health_unmount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) out_log_dealloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) mp->m_flags |= XFS_MOUNT_UNMOUNTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) xfs_log_mount_cancel(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) out_fail_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) xfs_wait_buftarg(mp->m_logdev_targp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) xfs_wait_buftarg(mp->m_ddev_targp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) out_free_perag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) xfs_free_perag(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) out_free_dir:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) xfs_da_unmount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) out_remove_uuid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) xfs_uuid_unmount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) out_remove_errortag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) xfs_errortag_del(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) out_remove_error_sysfs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) xfs_error_sysfs_del(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) out_del_stats:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) xfs_sysfs_del(&mp->m_stats.xs_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) out_remove_sysfs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) xfs_sysfs_del(&mp->m_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * This flushes out the inodes,dquots and the superblock, unmounts the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * log and makes sure that incore structures are freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) xfs_unmountfs(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) uint64_t resblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) xfs_stop_block_reaping(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) xfs_fs_unreserve_ag_blocks(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) xfs_qm_unmount_quotas(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) xfs_rtunmount_inodes(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) xfs_irele(mp->m_rootip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * We can potentially deadlock here if we have an inode cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * that has been freed has its buffer still pinned in memory because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * the transaction is still sitting in a iclog. The stale inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * on that buffer will be pinned to the buffer until the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * transaction hits the disk and the callbacks run. Pushing the AIL will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * skip the stale inodes and may never see the pinned buffer, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * nothing will push out the iclog and unpin the buffer. Hence we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * need to force the log here to ensure all items are flushed into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * AIL before we go any further.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) xfs_log_force(mp, XFS_LOG_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * Wait for all busy extents to be freed, including completion of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) * any discard operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) xfs_extent_busy_wait_all(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) flush_workqueue(xfs_discard_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * We now need to tell the world we are unmounting. This will allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * us to detect that the filesystem is going away and we should error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * out anything that we have been retrying in the background. This will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * prevent neverending retries in AIL pushing from hanging the unmount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) mp->m_flags |= XFS_MOUNT_UNMOUNTING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * Flush all pending changes from the AIL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) xfs_ail_push_all_sync(mp->m_ail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * Reclaim all inodes. At this point there should be no dirty inodes and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * none should be pinned or locked. Stop background inode reclaim here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * if it is still running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) cancel_delayed_work_sync(&mp->m_reclaim_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) xfs_reclaim_inodes(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) xfs_health_unmount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) xfs_qm_unmount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * Unreserve any blocks we have so that when we unmount we don't account
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * the reserved free space as used. This is really only necessary for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * lazy superblock counting because it trusts the incore superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * counters to be absolutely correct on clean unmount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * We don't bother correcting this elsewhere for lazy superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * counting because on mount of an unclean filesystem we reconstruct the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * correct counter value and this is irrelevant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * For non-lazy counter filesystems, this doesn't matter at all because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * we only every apply deltas to the superblock and hence the incore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * value does not matter....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) resblks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) error = xfs_reserve_blocks(mp, &resblks, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) xfs_warn(mp, "Unable to free reserved block pool. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) "Freespace may not be correct on next mount.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) error = xfs_log_sbcount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) xfs_warn(mp, "Unable to update superblock counters. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) "Freespace may not be correct on next mount.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) xfs_log_unmount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) xfs_da_unmount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) xfs_uuid_unmount(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) #if defined(DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) xfs_errortag_clearall(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) xfs_free_perag(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) xfs_errortag_del(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) xfs_error_sysfs_del(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) xfs_sysfs_del(&mp->m_stats.xs_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) xfs_sysfs_del(&mp->m_kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * Determine whether modifications can proceed. The caller specifies the minimum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * freeze level for which modifications should not be allowed. This allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * certain operations to proceed while the freeze sequence is in progress, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) xfs_fs_writable(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) ASSERT(level > SB_UNFROZEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if ((mp->m_super->s_writers.frozen >= level) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_RDONLY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * xfs_log_sbcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * Sync the superblock counters to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) * Note this code can be called during the process of freezing, so we use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) * transaction allocator that does not block when the transaction subsystem is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) * in its frozen state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) xfs_log_sbcount(xfs_mount_t *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /* allow this to proceed during the freeze sequence... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * we don't need to do this if we are updating the superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * counters on every modification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return xfs_sync_sb(mp, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * Deltas for the block count can vary from 1 to very large, but lock contention
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * only occurs on frequent small block count updates such as in the delayed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * allocation path for buffered writes (page a time updates). Hence we set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * a large batch count (1024) to minimise global counter updates except when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) * we get near to ENOSPC and we have to be very accurate with our updates.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) #define XFS_FDBLOCKS_BATCH 1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) xfs_mod_fdblocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) int64_t delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) bool rsvd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) int64_t lcounter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) long long res_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) s32 batch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (delta > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * If the reserve pool is depleted, put blocks back into it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * first. Most of the time the pool is full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (likely(mp->m_resblks == mp->m_resblks_avail)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) percpu_counter_add(&mp->m_fdblocks, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) spin_lock(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (res_used > delta) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) mp->m_resblks_avail += delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) delta -= res_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) mp->m_resblks_avail = mp->m_resblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) percpu_counter_add(&mp->m_fdblocks, delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) spin_unlock(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * Taking blocks away, need to be more accurate the closer we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * are to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * If the counter has a value of less than 2 * max batch size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * then make everything serialise as we are real close to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * ENOSPC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) XFS_FDBLOCKS_BATCH) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) batch = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) batch = XFS_FDBLOCKS_BATCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) percpu_counter_add_batch(&mp->m_fdblocks, delta, batch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (__percpu_counter_compare(&mp->m_fdblocks, mp->m_alloc_set_aside,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) XFS_FDBLOCKS_BATCH) >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) /* we had space! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * lock up the sb for dipping into reserves before releasing the space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) * that took us to ENOSPC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) spin_lock(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) percpu_counter_add(&mp->m_fdblocks, -delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (!rsvd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) goto fdblocks_enospc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) lcounter = (long long)mp->m_resblks_avail + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) if (lcounter >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) mp->m_resblks_avail = lcounter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) spin_unlock(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) xfs_warn_once(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) "Reserve blocks depleted! Consider increasing reserve pool size.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) fdblocks_enospc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) spin_unlock(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) xfs_mod_frextents(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) int64_t delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) int64_t lcounter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) spin_lock(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) lcounter = mp->m_sb.sb_frextents + delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) if (lcounter < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) ret = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) mp->m_sb.sb_frextents = lcounter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) spin_unlock(&mp->m_sb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * Used to free the superblock along various error paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) xfs_freesb(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) struct xfs_buf *bp = mp->m_sb_bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) xfs_buf_lock(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) mp->m_sb_bp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * If the underlying (data/log/rt) device is readonly, there are some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * operations that cannot proceed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) xfs_dev_is_read_only(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) char *message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (xfs_readonly_buftarg(mp->m_ddev_targp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) xfs_readonly_buftarg(mp->m_logdev_targp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) (mp->m_rtdev_targp && xfs_readonly_buftarg(mp->m_rtdev_targp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) xfs_notice(mp, "%s required on read-only device.", message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) xfs_notice(mp, "write access unavailable, cannot proceed.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) /* Force the summary counters to be recalculated at next mount. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) xfs_force_summary_recalc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (!xfs_sb_version_haslazysbcount(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) xfs_fs_mark_sick(mp, XFS_SICK_FS_COUNTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * Update the in-core delayed block counter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * We prefer to update the counter without having to take a spinlock for every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * counter update (i.e. batching). Each change to delayed allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * reservations can change can easily exceed the default percpu counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * batching, so we use a larger batch factor here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) * Note that we don't currently have any callers requiring fast summation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * (e.g. percpu_counter_read) so we can use a big batch value here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) #define XFS_DELALLOC_BATCH (4096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) xfs_mod_delalloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) int64_t delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) percpu_counter_add_batch(&mp->m_delalloc_blks, delta,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) XFS_DELALLOC_BATCH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }