^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xfs_log_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "xfs_bit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "xfs_sb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "xfs_defer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "xfs_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "xfs_rmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "xfs_alloc_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "xfs_alloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "xfs_extent_busy.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "xfs_errortag.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "xfs_error.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "xfs_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "xfs_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "xfs_buf_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "xfs_log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "xfs_ag_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "xfs_bmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) extern kmem_zone_t *xfs_bmap_free_item_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct workqueue_struct *xfs_alloc_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define XFSA_FIXUP_BNO_OK 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define XFSA_FIXUP_CNT_OK 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Size of the AGFL. For CRC-enabled filesystes we steal a couple of slots in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * the beginning of the block for a proper header with the location information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * and CRC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) xfs_agfl_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned int size = mp->m_sb.sb_sectsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (xfs_sb_version_hascrc(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) size -= sizeof(struct xfs_agfl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return size / sizeof(xfs_agblock_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) xfs_refc_block(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (xfs_sb_version_hasrmapbt(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return XFS_RMAP_BLOCK(mp) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (xfs_sb_version_hasfinobt(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return XFS_FIBT_BLOCK(mp) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return XFS_IBT_BLOCK(mp) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) xfs_extlen_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) xfs_prealloc_blocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (xfs_sb_version_hasreflink(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return xfs_refc_block(mp) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (xfs_sb_version_hasrmapbt(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return XFS_RMAP_BLOCK(mp) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (xfs_sb_version_hasfinobt(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return XFS_FIBT_BLOCK(mp) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return XFS_IBT_BLOCK(mp) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * AGF buffer (PV 947395), we place constraints on the relationship among
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * actual allocations for data blocks, freelist blocks, and potential file data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * bmap btree blocks. However, these restrictions may result in no actual space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * allocated for a delayed extent, for example, a data block in a certain AG is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * allocated but there is no additional block for the additional bmap btree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * block due to a split of the bmap btree of the file. The result of this may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * lead to an infinite loop when the file gets flushed to disk and all delayed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * extents need to be actually allocated. To get around this, we explicitly set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * aside a few blocks which will not be reserved in delayed allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * We need to reserve 4 fsbs _per AG_ for the freelist and 4 more to handle a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * potential split of the file's bmap btree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) xfs_alloc_set_aside(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * When deciding how much space to allocate out of an AG, we limit the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * allocation maximum size to the size the AG. However, we cannot use all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * blocks in the AG - some are permanently used by metadata. These
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * blocks are generally:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * - the AG superblock, AGF, AGI and AGFL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * the AGI free inode and rmap btree root blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * - blocks on the AGFL according to xfs_alloc_set_aside() limits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * - the rmapbt root block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * The AG headers are sector sized, so the amount of space they take up is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * dependent on filesystem geometry. The others are all single blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) xfs_alloc_ag_max_usable(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned int blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) blocks += XFS_ALLOC_AGFL_RESERVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) blocks += 3; /* AGF, AGI btree root blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (xfs_sb_version_hasfinobt(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) blocks++; /* finobt root block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (xfs_sb_version_hasrmapbt(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) blocks++; /* rmap root block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (xfs_sb_version_hasreflink(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) blocks++; /* refcount root block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return mp->m_sb.sb_agblocks - blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * Lookup the record equal to [bno, len] in the btree given by cur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) STATIC int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) xfs_alloc_lookup_eq(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct xfs_btree_cur *cur, /* btree cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) xfs_agblock_t bno, /* starting block of extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) xfs_extlen_t len, /* length of extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int *stat) /* success/failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) cur->bc_rec.a.ar_startblock = bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) cur->bc_rec.a.ar_blockcount = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) cur->bc_ag.abt.active = (*stat == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * Lookup the first record greater than or equal to [bno, len]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * in the btree given by cur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) xfs_alloc_lookup_ge(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct xfs_btree_cur *cur, /* btree cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) xfs_agblock_t bno, /* starting block of extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) xfs_extlen_t len, /* length of extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int *stat) /* success/failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) cur->bc_rec.a.ar_startblock = bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) cur->bc_rec.a.ar_blockcount = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) error = xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) cur->bc_ag.abt.active = (*stat == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * Lookup the first record less than or equal to [bno, len]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * in the btree given by cur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) xfs_alloc_lookup_le(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct xfs_btree_cur *cur, /* btree cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) xfs_agblock_t bno, /* starting block of extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) xfs_extlen_t len, /* length of extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) int *stat) /* success/failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) cur->bc_rec.a.ar_startblock = bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) cur->bc_rec.a.ar_blockcount = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) cur->bc_ag.abt.active = (*stat == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) xfs_alloc_cur_active(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct xfs_btree_cur *cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return cur && cur->bc_ag.abt.active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * Update the record referred to by cur to the value given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * by [bno, len].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * This either works (return 0) or gets an EFSCORRUPTED error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) STATIC int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) xfs_alloc_update(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct xfs_btree_cur *cur, /* btree cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) xfs_agblock_t bno, /* starting block of extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) xfs_extlen_t len) /* length of extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) union xfs_btree_rec rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) rec.alloc.ar_startblock = cpu_to_be32(bno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) rec.alloc.ar_blockcount = cpu_to_be32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) return xfs_btree_update(cur, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * Get the data from the pointed-to record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) xfs_alloc_get_rec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct xfs_btree_cur *cur, /* btree cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) xfs_agblock_t *bno, /* output: starting block of extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) xfs_extlen_t *len, /* output: length of extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int *stat) /* output: success/failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct xfs_mount *mp = cur->bc_mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) xfs_agnumber_t agno = cur->bc_ag.agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) union xfs_btree_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) error = xfs_btree_get_rec(cur, &rec, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (error || !(*stat))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) *bno = be32_to_cpu(rec->alloc.ar_startblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) *len = be32_to_cpu(rec->alloc.ar_blockcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (*len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) goto out_bad_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* check for valid extent range, including overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (!xfs_verify_agbno(mp, agno, *bno))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) goto out_bad_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (*bno > *bno + *len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) goto out_bad_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (!xfs_verify_agbno(mp, agno, *bno + *len - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) goto out_bad_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) out_bad_rec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) "%s Freespace BTree record corruption in AG %d detected!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) cur->bc_btnum == XFS_BTNUM_BNO ? "Block" : "Size", agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) "start block 0x%x block count 0x%x", *bno, *len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * Compute aligned version of the found extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * Takes alignment and min length into account.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) STATIC bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) xfs_alloc_compute_aligned(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) xfs_alloc_arg_t *args, /* allocation argument structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) xfs_agblock_t foundbno, /* starting block in found extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) xfs_extlen_t foundlen, /* length in found extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) xfs_agblock_t *resbno, /* result block number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) xfs_extlen_t *reslen, /* result length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) unsigned *busy_gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) xfs_agblock_t bno = foundbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) xfs_extlen_t len = foundlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) xfs_extlen_t diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) bool busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /* Trim busy sections out of found extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) busy = xfs_extent_busy_trim(args, &bno, &len, busy_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * If we have a largish extent that happens to start before min_agbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * see if we can shift it into range...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (bno < args->min_agbno && bno + len > args->min_agbno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) diff = args->min_agbno - bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) if (len > diff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) bno += diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) len -= diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (args->alignment > 1 && len >= args->minlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) diff = aligned_bno - bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) *resbno = aligned_bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) *reslen = diff >= len ? 0 : len - diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) *resbno = bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) *reslen = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * Compute best start block and diff for "near" allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * freelen >= wantlen already checked by caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) STATIC xfs_extlen_t /* difference value (absolute) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) xfs_alloc_compute_diff(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) xfs_agblock_t wantbno, /* target starting block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) xfs_extlen_t wantlen, /* target length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) xfs_extlen_t alignment, /* target alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) int datatype, /* are we allocating data? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) xfs_agblock_t freebno, /* freespace's starting block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) xfs_extlen_t freelen, /* freespace's length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) xfs_agblock_t *newbnop) /* result: best start block from free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) xfs_agblock_t freeend; /* end of freespace extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) xfs_agblock_t newbno1; /* return block number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) xfs_agblock_t newbno2; /* other new block number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) xfs_extlen_t newlen1=0; /* length with newbno1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) xfs_extlen_t newlen2=0; /* length with newbno2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) xfs_agblock_t wantend; /* end of target extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) bool userdata = datatype & XFS_ALLOC_USERDATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ASSERT(freelen >= wantlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) freeend = freebno + freelen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) wantend = wantbno + wantlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * We want to allocate from the start of a free extent if it is past
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * the desired block or if we are allocating user data and the free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * extent is before desired block. The second case is there to allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * for contiguous allocation from the remaining free space if the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * grows in the short term.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (freebno >= wantbno || (userdata && freeend < wantend)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if ((newbno1 = roundup(freebno, alignment)) >= freeend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) newbno1 = NULLAGBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) } else if (freeend >= wantend && alignment > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) newbno1 = roundup(wantbno, alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) newbno2 = newbno1 - alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (newbno1 >= freeend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) newbno1 = NULLAGBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (newbno2 < freebno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) newbno2 = NULLAGBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (newlen1 < newlen2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) (newlen1 == newlen2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) XFS_ABSDIFF(newbno1, wantbno) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) XFS_ABSDIFF(newbno2, wantbno)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) newbno1 = newbno2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) } else if (newbno2 != NULLAGBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) newbno1 = newbno2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) } else if (freeend >= wantend) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) newbno1 = wantbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) } else if (alignment > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) newbno1 = roundup(freeend - wantlen, alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (newbno1 > freeend - wantlen &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) newbno1 - alignment >= freebno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) newbno1 -= alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) else if (newbno1 >= freeend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) newbno1 = NULLAGBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) newbno1 = freeend - wantlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) *newbnop = newbno1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * Fix up the length, based on mod and prod.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * len should be k * prod + mod for some k.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * If len is too small it is returned unchanged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * If len hits maxlen it is left alone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) xfs_alloc_fix_len(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) xfs_alloc_arg_t *args) /* allocation argument structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) xfs_extlen_t k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) xfs_extlen_t rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) ASSERT(args->mod < args->prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) rlen = args->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) ASSERT(rlen >= args->minlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ASSERT(rlen <= args->maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) (args->mod == 0 && rlen < args->prod))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) k = rlen % args->prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (k == args->mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (k > args->mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) rlen = rlen - (k - args->mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) rlen = rlen - args->prod + (args->mod - k);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) /* casts to (int) catch length underflows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if ((int)rlen < (int)args->minlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) ASSERT(rlen % args->prod == args->mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) rlen + args->minleft);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) args->len = rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * Update the two btrees, logically removing from freespace the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * starting at rbno, rlen blocks. The extent is contained within the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * actual (current) free extent fbno for flen blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * Flags are passed in indicating whether the cursors are set to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * relevant records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) STATIC int /* error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) xfs_alloc_fixup_trees(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) xfs_agblock_t fbno, /* starting block of free extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) xfs_extlen_t flen, /* length of free extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) xfs_agblock_t rbno, /* starting block of returned extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) xfs_extlen_t rlen, /* length of returned extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int flags) /* flags, XFSA_FIXUP_... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int error; /* error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) int i; /* operation results */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) xfs_agblock_t nfbno1; /* first new free startblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) xfs_agblock_t nfbno2; /* second new free startblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) xfs_extlen_t nflen1=0; /* first new free length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) xfs_extlen_t nflen2=0; /* second new free length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct xfs_mount *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) mp = cnt_cur->bc_mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * Look up the record in the by-size tree if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (flags & XFSA_FIXUP_CNT_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (XFS_IS_CORRUPT(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) i != 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) nfbno1 != fbno ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) nflen1 != flen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (XFS_IS_CORRUPT(mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * Look up the record in the by-block tree if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (flags & XFSA_FIXUP_BNO_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (XFS_IS_CORRUPT(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) i != 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) nfbno1 != fbno ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) nflen1 != flen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (XFS_IS_CORRUPT(mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct xfs_btree_block *bnoblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct xfs_btree_block *cntblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (XFS_IS_CORRUPT(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) bnoblock->bb_numrecs !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) cntblock->bb_numrecs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * Deal with all four cases: the allocated record is contained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * within the freespace record, so we can have new freespace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * at either (or both) end, or no freespace remaining.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (rbno == fbno && rlen == flen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) nfbno1 = nfbno2 = NULLAGBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) else if (rbno == fbno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) nfbno1 = rbno + rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) nflen1 = flen - rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) nfbno2 = NULLAGBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) } else if (rbno + rlen == fbno + flen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) nfbno1 = fbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) nflen1 = flen - rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) nfbno2 = NULLAGBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) nfbno1 = fbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) nflen1 = rbno - fbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) nfbno2 = rbno + rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) nflen2 = (fbno + flen) - nfbno2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * Delete the entry from the by-size btree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if ((error = xfs_btree_delete(cnt_cur, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (XFS_IS_CORRUPT(mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * Add new by-size btree entry(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (nfbno1 != NULLAGBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (XFS_IS_CORRUPT(mp, i != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if ((error = xfs_btree_insert(cnt_cur, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (XFS_IS_CORRUPT(mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (nfbno2 != NULLAGBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (XFS_IS_CORRUPT(mp, i != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if ((error = xfs_btree_insert(cnt_cur, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (XFS_IS_CORRUPT(mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * Fix up the by-block btree entry(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (nfbno1 == NULLAGBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * No remaining freespace, just delete the by-block tree entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if ((error = xfs_btree_delete(bno_cur, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (XFS_IS_CORRUPT(mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * Update the by-block entry to start later|be shorter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (nfbno2 != NULLAGBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * 2 resulting free entries, need to add one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (XFS_IS_CORRUPT(mp, i != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) if ((error = xfs_btree_insert(bno_cur, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (XFS_IS_CORRUPT(mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static xfs_failaddr_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) xfs_agfl_verify(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) struct xfs_mount *mp = bp->b_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct xfs_agfl *agfl = XFS_BUF_TO_AGFL(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) __be32 *agfl_bno = xfs_buf_to_agfl_bno(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * There is no verification of non-crc AGFLs because mkfs does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * initialise the AGFL to zero or NULL. Hence the only valid part of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * AGFL is what the AGF says is active. We can't get to the AGF, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * can't verify just those entries are valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (!xfs_sb_version_hascrc(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (!xfs_verify_magic(bp, agfl->agfl_magicnum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (!uuid_equal(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * during growfs operations, the perag is not fully initialised,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * so we can't use it for any useful checking. growfs ensures we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * use it by using uncached buffers that don't have the perag attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * so we can detect and avoid this problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (bp->b_pag && be32_to_cpu(agfl->agfl_seqno) != bp->b_pag->pag_agno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) for (i = 0; i < xfs_agfl_size(mp); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (be32_to_cpu(agfl_bno[i]) != NULLAGBLOCK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) be32_to_cpu(agfl_bno[i]) >= mp->m_sb.sb_agblocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (!xfs_log_check_lsn(mp, be64_to_cpu(XFS_BUF_TO_AGFL(bp)->agfl_lsn)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) xfs_agfl_read_verify(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct xfs_mount *mp = bp->b_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) xfs_failaddr_t fa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * There is no verification of non-crc AGFLs because mkfs does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * initialise the AGFL to zero or NULL. Hence the only valid part of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * AGFL is what the AGF says is active. We can't get to the AGF, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * can't verify just those entries are valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (!xfs_sb_version_hascrc(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (!xfs_buf_verify_cksum(bp, XFS_AGFL_CRC_OFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) xfs_verifier_error(bp, -EFSBADCRC, __this_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) fa = xfs_agfl_verify(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (fa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) xfs_verifier_error(bp, -EFSCORRUPTED, fa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) xfs_agfl_write_verify(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct xfs_mount *mp = bp->b_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) xfs_failaddr_t fa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /* no verification of non-crc AGFLs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (!xfs_sb_version_hascrc(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) fa = xfs_agfl_verify(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (fa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) xfs_verifier_error(bp, -EFSCORRUPTED, fa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (bip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) XFS_BUF_TO_AGFL(bp)->agfl_lsn = cpu_to_be64(bip->bli_item.li_lsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) xfs_buf_update_cksum(bp, XFS_AGFL_CRC_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) const struct xfs_buf_ops xfs_agfl_buf_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) .name = "xfs_agfl",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) .magic = { cpu_to_be32(XFS_AGFL_MAGIC), cpu_to_be32(XFS_AGFL_MAGIC) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) .verify_read = xfs_agfl_read_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) .verify_write = xfs_agfl_write_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) .verify_struct = xfs_agfl_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * Read in the allocation group free block array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) xfs_alloc_read_agfl(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) xfs_mount_t *mp, /* mount point structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) xfs_trans_t *tp, /* transaction pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) xfs_agnumber_t agno, /* allocation group number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) xfs_buf_t **bpp) /* buffer for the ag free block array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) xfs_buf_t *bp; /* return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) ASSERT(agno != NULLAGNUMBER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) error = xfs_trans_read_buf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) mp, tp, mp->m_ddev_targp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_agfl_buf_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) xfs_buf_set_ref(bp, XFS_AGFL_REF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) *bpp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) xfs_alloc_update_counters(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) struct xfs_buf *agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct xfs_agf *agf = agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) agbp->b_pag->pagf_freeblks += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) be32_add_cpu(&agf->agf_freeblks, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) xfs_trans_agblocks_delta(tp, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (unlikely(be32_to_cpu(agf->agf_freeblks) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) be32_to_cpu(agf->agf_length))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) xfs_buf_mark_corrupt(agbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * Block allocation algorithm and data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) struct xfs_alloc_cur {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) struct xfs_btree_cur *cnt; /* btree cursors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) struct xfs_btree_cur *bnolt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) struct xfs_btree_cur *bnogt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) xfs_extlen_t cur_len;/* current search length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) xfs_agblock_t rec_bno;/* extent startblock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) xfs_extlen_t rec_len;/* extent length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) xfs_agblock_t bno; /* alloc bno */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) xfs_extlen_t len; /* alloc len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) xfs_extlen_t diff; /* diff from search bno */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) unsigned int busy_gen;/* busy state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) bool busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * Set up cursors, etc. in the extent allocation cursor. This function can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) * called multiple times to reset an initialized structure without having to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * reallocate cursors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) xfs_alloc_cur_setup(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct xfs_alloc_arg *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct xfs_alloc_cur *acur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ASSERT(args->alignment == 1 || args->type != XFS_ALLOCTYPE_THIS_BNO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) acur->cur_len = args->maxlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) acur->rec_bno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) acur->rec_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) acur->bno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) acur->len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) acur->diff = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) acur->busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) acur->busy_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) * Perform an initial cntbt lookup to check for availability of maxlen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * extents. If this fails, we'll return -ENOSPC to signal the caller to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * attempt a small allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) if (!acur->cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) acur->cnt = xfs_allocbt_init_cursor(args->mp, args->tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) args->agbp, args->agno, XFS_BTNUM_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) error = xfs_alloc_lookup_ge(acur->cnt, 0, args->maxlen, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * Allocate the bnobt left and right search cursors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (!acur->bnolt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) acur->bnolt = xfs_allocbt_init_cursor(args->mp, args->tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) args->agbp, args->agno, XFS_BTNUM_BNO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (!acur->bnogt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) acur->bnogt = xfs_allocbt_init_cursor(args->mp, args->tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) args->agbp, args->agno, XFS_BTNUM_BNO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return i == 1 ? 0 : -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) xfs_alloc_cur_close(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct xfs_alloc_cur *acur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) bool error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) int cur_error = XFS_BTREE_NOERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) cur_error = XFS_BTREE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (acur->cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) xfs_btree_del_cursor(acur->cnt, cur_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (acur->bnolt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) xfs_btree_del_cursor(acur->bnolt, cur_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) if (acur->bnogt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) xfs_btree_del_cursor(acur->bnogt, cur_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) acur->cnt = acur->bnolt = acur->bnogt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * Check an extent for allocation and track the best available candidate in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * allocation structure. The cursor is deactivated if it has entered an out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * range state based on allocation arguments. Optionally return the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * extent geometry and allocation status if requested by the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) xfs_alloc_cur_check(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct xfs_alloc_arg *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct xfs_alloc_cur *acur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) int *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) int error, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) xfs_agblock_t bno, bnoa, bnew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) xfs_extlen_t len, lena, diff = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) bool busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) unsigned busy_gen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) bool deactivate = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) bool isbnobt = cur->bc_btnum == XFS_BTNUM_BNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) *new = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) error = xfs_alloc_get_rec(cur, &bno, &len, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (XFS_IS_CORRUPT(args->mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * Check minlen and deactivate a cntbt cursor if out of acceptable size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * range (i.e., walking backwards looking for a minlen extent).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (len < args->minlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) deactivate = !isbnobt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) busy = xfs_alloc_compute_aligned(args, bno, len, &bnoa, &lena,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) &busy_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) acur->busy |= busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) acur->busy_gen = busy_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) /* deactivate a bnobt cursor outside of locality range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (bnoa < args->min_agbno || bnoa > args->max_agbno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) deactivate = isbnobt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (lena < args->minlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) args->len = XFS_EXTLEN_MIN(lena, args->maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) xfs_alloc_fix_len(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) ASSERT(args->len >= args->minlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (args->len < acur->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * We have an aligned record that satisfies minlen and beats or matches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * the candidate extent size. Compare locality for near allocation mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) ASSERT(args->type == XFS_ALLOCTYPE_NEAR_BNO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) diff = xfs_alloc_compute_diff(args->agbno, args->len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) args->alignment, args->datatype,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) bnoa, lena, &bnew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (bnew == NULLAGBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * Deactivate a bnobt cursor with worse locality than the current best.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (diff > acur->diff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) deactivate = isbnobt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) ASSERT(args->len > acur->len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) (args->len == acur->len && diff <= acur->diff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) acur->rec_bno = bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) acur->rec_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) acur->bno = bnew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) acur->len = args->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) acur->diff = diff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) *new = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * We're done if we found a perfect allocation. This only deactivates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * the current cursor, but this is just an optimization to terminate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * cntbt search that otherwise runs to the edge of the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (acur->diff == 0 && acur->len == args->maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) deactivate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (deactivate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) cur->bc_ag.abt.active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) trace_xfs_alloc_cur_check(args->mp, cur->bc_btnum, bno, len, diff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) *new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * Complete an allocation of a candidate extent. Remove the extent from both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * trees and update the args structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) xfs_alloc_cur_finish(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) struct xfs_alloc_arg *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct xfs_alloc_cur *acur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) ASSERT(acur->cnt && acur->bnolt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ASSERT(acur->bno >= acur->rec_bno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) ASSERT(acur->bno + acur->len <= acur->rec_bno + acur->rec_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) ASSERT(acur->rec_bno + acur->rec_len <= be32_to_cpu(agf->agf_length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) error = xfs_alloc_fixup_trees(acur->cnt, acur->bnolt, acur->rec_bno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) acur->rec_len, acur->bno, acur->len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) args->agbno = acur->bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) args->len = acur->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) args->wasfromfl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) trace_xfs_alloc_cur(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * Locality allocation lookup algorithm. This expects a cntbt cursor and uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * bno optimized lookup to search for extents with ideal size and locality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) xfs_alloc_cntbt_iter(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct xfs_alloc_arg *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct xfs_alloc_cur *acur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct xfs_btree_cur *cur = acur->cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) xfs_agblock_t bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) xfs_extlen_t len, cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (!xfs_alloc_cur_active(cur))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /* locality optimized lookup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) cur_len = acur->cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) error = xfs_alloc_lookup_ge(cur, args->agbno, cur_len, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) error = xfs_alloc_get_rec(cur, &bno, &len, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /* check the current record and update search length from it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) error = xfs_alloc_cur_check(args, acur, cur, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) ASSERT(len >= acur->cur_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) acur->cur_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * We looked up the first record >= [agbno, len] above. The agbno is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * secondary key and so the current record may lie just before or after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * agbno. If it is past agbno, check the previous record too so long as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * the length matches as it may be closer. Don't check a smaller record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * because that could deactivate our cursor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) if (bno > args->agbno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) error = xfs_btree_decrement(cur, 0, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (!error && i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) error = xfs_alloc_get_rec(cur, &bno, &len, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (!error && i && len == acur->cur_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) error = xfs_alloc_cur_check(args, acur, cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * Increment the search key until we find at least one allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * candidate or if the extent we found was larger. Otherwise, double the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * search key to optimize the search. Efficiency is more important here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * than absolute best locality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) cur_len <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (!acur->len || acur->cur_len >= cur_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) acur->cur_len++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) acur->cur_len = cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * Deal with the case where only small freespaces remain. Either return the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * contents of the last freespace record, or allocate space from the freelist if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * there is nothing in the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) STATIC int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) xfs_alloc_ag_vextent_small(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) struct xfs_alloc_arg *args, /* allocation argument structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) struct xfs_btree_cur *ccur, /* optional by-size cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) xfs_agblock_t *fbnop, /* result block number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) xfs_extlen_t *flenp, /* result length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) int *stat) /* status: 0-freelist, 1-normal/none */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) struct xfs_agf *agf = args->agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) xfs_agblock_t fbno = NULLAGBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) xfs_extlen_t flen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * If a cntbt cursor is provided, try to allocate the largest record in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * the tree. Try the AGFL if the cntbt is empty, otherwise fail the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * allocation. Make sure to respect minleft even when pulling from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * freelist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) if (ccur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) error = xfs_btree_decrement(ccur, 0, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) if (XFS_IS_CORRUPT(args->mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (args->minlen != 1 || args->alignment != 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) args->resv == XFS_AG_RESV_AGFL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) be32_to_cpu(agf->agf_flcount) <= args->minleft)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (fbno == NULLAGBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) xfs_extent_busy_reuse(args->mp, args->agno, fbno, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) (args->datatype & XFS_ALLOC_NOBUSY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (args->datatype & XFS_ALLOC_USERDATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) struct xfs_buf *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) error = xfs_trans_get_buf(args->tp, args->mp->m_ddev_targp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) XFS_AGB_TO_DADDR(args->mp, args->agno, fbno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) args->mp->m_bsize, 0, &bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) xfs_trans_binval(args->tp, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) *fbnop = args->agbno = fbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) *flenp = args->len = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (XFS_IS_CORRUPT(args->mp, fbno >= be32_to_cpu(agf->agf_length))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) args->wasfromfl = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) trace_xfs_alloc_small_freelist(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * If we're feeding an AGFL block to something that doesn't live in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * free space, we need to clear out the OWN_AG rmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) error = xfs_rmap_free(args->tp, args->agbp, args->agno, fbno, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) &XFS_RMAP_OINFO_AG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) *stat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * Can't do the allocation, give up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) if (flen < args->minlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) args->agbno = NULLAGBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) trace_xfs_alloc_small_notenough(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) flen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) *fbnop = fbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) *flenp = flen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) *stat = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) trace_xfs_alloc_small_done(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) trace_xfs_alloc_small_error(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * Allocate a variable extent in the allocation group agno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) * Type and bno are used to determine where in the allocation group the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) * extent will start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * Extent's length (returned in *len) will be between minlen and maxlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * and of the form k * prod + mod unless there's nothing that large.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) STATIC int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) xfs_alloc_ag_vextent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) xfs_alloc_arg_t *args) /* argument structure for allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) int error=0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) ASSERT(args->minlen > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) ASSERT(args->maxlen > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) ASSERT(args->minlen <= args->maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) ASSERT(args->mod < args->prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) ASSERT(args->alignment > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) * Branch to correct routine based on the type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) args->wasfromfl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) switch (args->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) case XFS_ALLOCTYPE_THIS_AG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) error = xfs_alloc_ag_vextent_size(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) case XFS_ALLOCTYPE_NEAR_BNO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) error = xfs_alloc_ag_vextent_near(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) case XFS_ALLOCTYPE_THIS_BNO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) error = xfs_alloc_ag_vextent_exact(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) /* NOTREACHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (error || args->agbno == NULLAGBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) ASSERT(args->len >= args->minlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) ASSERT(args->len <= args->maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) ASSERT(!args->wasfromfl || args->resv != XFS_AG_RESV_AGFL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) ASSERT(args->agbno % args->alignment == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) /* if not file data, insert new block into the reverse map btree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (!xfs_rmap_should_skip_owner_update(&args->oinfo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) args->agbno, args->len, &args->oinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (!args->wasfromfl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) error = xfs_alloc_update_counters(args->tp, args->agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) -((long)(args->len)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) ASSERT(!xfs_extent_busy_search(args->mp, args->agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) args->agbno, args->len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) xfs_ag_resv_alloc_extent(args->pag, args->resv, args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) XFS_STATS_INC(args->mp, xs_allocx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) XFS_STATS_ADD(args->mp, xs_allocb, args->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * Allocate a variable extent at exactly agno/bno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * Extent's length (returned in *len) will be between minlen and maxlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * and of the form k * prod + mod unless there's nothing that large.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) STATIC int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) xfs_alloc_ag_vextent_exact(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) xfs_alloc_arg_t *args) /* allocation argument structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) struct xfs_agf __maybe_unused *agf = args->agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) xfs_agblock_t fbno; /* start block of found extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) xfs_extlen_t flen; /* length of found extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) xfs_agblock_t tbno; /* start block of busy extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) xfs_extlen_t tlen; /* length of busy extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) xfs_agblock_t tend; /* end block of busy extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) int i; /* success/failure of operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) unsigned busy_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) ASSERT(args->alignment == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * Allocate/initialize a cursor for the by-number freespace btree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) args->agno, XFS_BTNUM_BNO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * Lookup bno and minlen in the btree (minlen is irrelevant, really).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * Look for the closest free block <= bno, it must contain bno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * if any free block does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) goto not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * Grab the freespace record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (XFS_IS_CORRUPT(args->mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) ASSERT(fbno <= args->agbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * Check for overlapping busy extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) tbno = fbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) tlen = flen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) xfs_extent_busy_trim(args, &tbno, &tlen, &busy_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * Give up if the start of the extent is busy, or the freespace isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * long enough for the minimum request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (tbno > args->agbno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) goto not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (tlen < args->minlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) goto not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) tend = tbno + tlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (tend < args->agbno + args->minlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) goto not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * End of extent will be smaller of the freespace end and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * maximal requested end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * Fix the length according to mod and prod if given.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) - args->agbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) xfs_alloc_fix_len(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) ASSERT(args->agbno + args->len <= tend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * We are allocating agbno for args->len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * Allocate/initialize a cursor for the by-size btree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) args->agno, XFS_BTNUM_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) ASSERT(args->agbno + args->len <= be32_to_cpu(agf->agf_length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) args->len, XFSA_FIXUP_BNO_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) args->wasfromfl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) trace_xfs_alloc_exact_done(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) not_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) /* Didn't find it, return null. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) args->agbno = NULLAGBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) trace_xfs_alloc_exact_notfound(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) error0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) trace_xfs_alloc_exact_error(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * Search a given number of btree records in a given direction. Check each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * record against the good extent we've already found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) xfs_alloc_walk_iter(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct xfs_alloc_arg *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) struct xfs_alloc_cur *acur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) bool increment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) bool find_one, /* quit on first candidate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) int count, /* rec count (-1 for infinite) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) int *stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) *stat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * Search so long as the cursor is active or we find a better extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * The cursor is deactivated if it extends beyond the range of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * current allocation candidate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) while (xfs_alloc_cur_active(cur) && count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) error = xfs_alloc_cur_check(args, acur, cur, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (i == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) *stat = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if (find_one)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (!xfs_alloc_cur_active(cur))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) if (increment)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) error = xfs_btree_increment(cur, 0, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) error = xfs_btree_decrement(cur, 0, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) cur->bc_ag.abt.active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) if (count > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * Search the by-bno and by-size btrees in parallel in search of an extent with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * ideal locality based on the NEAR mode ->agbno locality hint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) xfs_alloc_ag_vextent_locality(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) struct xfs_alloc_arg *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) struct xfs_alloc_cur *acur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) int *stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) struct xfs_btree_cur *fbcur = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) bool fbinc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) ASSERT(acur->len == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) ASSERT(args->type == XFS_ALLOCTYPE_NEAR_BNO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) *stat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) error = xfs_alloc_lookup_ge(acur->cnt, args->agbno, acur->cur_len, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) error = xfs_alloc_lookup_le(acur->bnolt, args->agbno, 0, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) error = xfs_alloc_lookup_ge(acur->bnogt, args->agbno, 0, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * Search the bnobt and cntbt in parallel. Search the bnobt left and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * right and lookup the closest extent to the locality hint for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * extent size key in the cntbt. The entire search terminates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * immediately on a bnobt hit because that means we've found best case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * locality. Otherwise the search continues until the cntbt cursor runs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * off the end of the tree. If no allocation candidate is found at this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * point, give up on locality, walk backwards from the end of the cntbt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * and take the first available extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * The parallel tree searches balance each other out to provide fairly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * consistent performance for various situations. The bnobt search can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * have pathological behavior in the worst case scenario of larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * allocation requests and fragmented free space. On the other hand, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) * bnobt is able to satisfy most smaller allocation requests much more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * quickly than the cntbt. The cntbt search can sift through fragmented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) * free space and sets of free extents for larger allocation requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) * more quickly than the bnobt. Since the locality hint is just a hint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * and we don't want to scan the entire bnobt for perfect locality, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * cntbt search essentially bounds the bnobt search such that we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * find good enough locality at reasonable performance in most cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) while (xfs_alloc_cur_active(acur->bnolt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) xfs_alloc_cur_active(acur->bnogt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) xfs_alloc_cur_active(acur->cnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) trace_xfs_alloc_cur_lookup(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * Search the bnobt left and right. In the case of a hit, finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * the search in the opposite direction and we're done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) error = xfs_alloc_walk_iter(args, acur, acur->bnolt, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) true, 1, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (i == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) trace_xfs_alloc_cur_left(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) fbcur = acur->bnogt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) fbinc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) error = xfs_alloc_walk_iter(args, acur, acur->bnogt, true, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 1, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (i == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) trace_xfs_alloc_cur_right(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) fbcur = acur->bnolt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) fbinc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * Check the extent with best locality based on the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * extent size search key and keep track of the best candidate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) error = xfs_alloc_cntbt_iter(args, acur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (!xfs_alloc_cur_active(acur->cnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) trace_xfs_alloc_cur_lookup_done(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) * If we failed to find anything due to busy extents, return empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) * handed so the caller can flush and retry. If no busy extents were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * found, walk backwards from the end of the cntbt as a last resort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) if (!xfs_alloc_cur_active(acur->cnt) && !acur->len && !acur->busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) error = xfs_btree_decrement(acur->cnt, 0, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) acur->cnt->bc_ag.abt.active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) fbcur = acur->cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) fbinc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * Search in the opposite direction for a better entry in the case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * a bnobt hit or walk backwards from the end of the cntbt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (fbcur) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) error = xfs_alloc_walk_iter(args, acur, fbcur, fbinc, true, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (acur->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) *stat = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) /* Check the last block of the cnt btree for allocations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) xfs_alloc_ag_vextent_lastblock(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) struct xfs_alloc_arg *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) struct xfs_alloc_cur *acur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) xfs_agblock_t *bno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) xfs_extlen_t *len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) bool *allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) /* Randomly don't execute the first algorithm. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) if (prandom_u32() & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * Start from the entry that lookup found, sequence through all larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * free blocks. If we're actually pointing at a record smaller than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * maxlen, go to the start of this block, and skip all those smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) * than minlen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (*len || args->alignment > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) acur->cnt->bc_ptrs[0] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) error = xfs_alloc_get_rec(acur->cnt, bno, len, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (XFS_IS_CORRUPT(args->mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) if (*len >= args->minlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) error = xfs_btree_increment(acur->cnt, 0, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) } while (i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) ASSERT(*len >= args->minlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (!i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) error = xfs_alloc_walk_iter(args, acur, acur->cnt, true, false, -1, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) * It didn't work. We COULD be in a case where there's a good record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) * somewhere, so try again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (acur->len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) trace_xfs_alloc_near_first(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) *allocated = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * Allocate a variable extent near bno in the allocation group agno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) * Extent's length (returned in len) will be between minlen and maxlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * and of the form k * prod + mod unless there's nothing that large.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) xfs_alloc_ag_vextent_near(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) struct xfs_alloc_arg *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) struct xfs_alloc_cur acur = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) int error; /* error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) int i; /* result code, temporary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) xfs_agblock_t bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) xfs_extlen_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /* handle uninitialized agbno range so caller doesn't have to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (!args->min_agbno && !args->max_agbno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) args->max_agbno = args->mp->m_sb.sb_agblocks - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) ASSERT(args->min_agbno <= args->max_agbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) /* clamp agbno to the range if it's outside */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (args->agbno < args->min_agbno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) args->agbno = args->min_agbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) if (args->agbno > args->max_agbno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) args->agbno = args->max_agbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) * Set up cursors and see if there are any free extents as big as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) * maxlen. If not, pick the last entry in the tree unless the tree is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) * empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) error = xfs_alloc_cur_setup(args, &acur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (error == -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) error = xfs_alloc_ag_vextent_small(args, acur.cnt, &bno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) &len, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (i == 0 || len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) trace_xfs_alloc_near_noentry(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) ASSERT(i == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) } else if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * First algorithm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) * If the requested extent is large wrt the freespaces available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) * in this a.g., then the cursor will be pointing to a btree entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) * near the right edge of the tree. If it's in the last btree leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) * block, then we just examine all the entries in that block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) * that are big enough, and pick the best one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) if (xfs_btree_islastblock(acur.cnt, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) bool allocated = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) error = xfs_alloc_ag_vextent_lastblock(args, &acur, &bno, &len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) &allocated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) if (allocated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) goto alloc_finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * Second algorithm. Combined cntbt and bnobt search to find ideal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * locality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) error = xfs_alloc_ag_vextent_locality(args, &acur, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) * If we couldn't get anything, give up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (!acur.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) if (acur.busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) trace_xfs_alloc_near_busy(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) xfs_extent_busy_flush(args->mp, args->pag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) acur.busy_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) trace_xfs_alloc_size_neither(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) args->agbno = NULLAGBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) alloc_finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) /* fix up btrees on a successful allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) error = xfs_alloc_cur_finish(args, &acur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) xfs_alloc_cur_close(&acur, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) * Allocate a variable extent anywhere in the allocation group agno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) * Extent's length (returned in len) will be between minlen and maxlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * and of the form k * prod + mod unless there's nothing that large.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) STATIC int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) xfs_alloc_ag_vextent_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) xfs_alloc_arg_t *args) /* allocation argument structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) struct xfs_agf *agf = args->agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) int error; /* error result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) xfs_agblock_t fbno; /* start of found freespace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) xfs_extlen_t flen; /* length of found freespace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) int i; /* temp status variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) xfs_agblock_t rbno; /* returned block number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) xfs_extlen_t rlen; /* length of returned extent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) bool busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) unsigned busy_gen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) * Allocate and initialize a cursor for the by-size btree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) args->agno, XFS_BTNUM_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) bno_cur = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) busy = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) * Look for an entry >= maxlen+alignment-1 blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) args->maxlen + args->alignment - 1, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) * If none then we have to settle for a smaller extent. In the case that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * there are no large extents, this will return the last entry in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * tree unless the tree is empty. In the case that there are only busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * large extents, this will return the largest small extent unless there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) * are no smaller extents available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (!i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) error = xfs_alloc_ag_vextent_small(args, cnt_cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) &fbno, &flen, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (i == 0 || flen == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) trace_xfs_alloc_size_noentry(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) ASSERT(i == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) busy = xfs_alloc_compute_aligned(args, fbno, flen, &rbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) &rlen, &busy_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) * Search for a non-busy extent that is large enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) if (XFS_IS_CORRUPT(args->mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) busy = xfs_alloc_compute_aligned(args, fbno, flen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) &rbno, &rlen, &busy_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (rlen >= args->maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) error = xfs_btree_increment(cnt_cur, 0, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * Our only valid extents must have been busy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) * Make it unbusy by forcing the log out and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) * retrying.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) xfs_btree_del_cursor(cnt_cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) trace_xfs_alloc_size_busy(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) xfs_extent_busy_flush(args->mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) args->pag, busy_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) * In the first case above, we got the last entry in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) * by-size btree. Now we check to see if the space hits maxlen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) * once aligned; if not, we search left for something better.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) * This can't happen in the second case above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (XFS_IS_CORRUPT(args->mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) rlen != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) (rlen > flen ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) rbno + rlen > fbno + flen))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) if (rlen < args->maxlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) xfs_agblock_t bestfbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) xfs_extlen_t bestflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) xfs_agblock_t bestrbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) xfs_extlen_t bestrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) bestrlen = rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) bestrbno = rbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) bestflen = flen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) bestfbno = fbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (XFS_IS_CORRUPT(args->mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (flen < bestrlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) busy = xfs_alloc_compute_aligned(args, fbno, flen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) &rbno, &rlen, &busy_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) if (XFS_IS_CORRUPT(args->mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) rlen != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) (rlen > flen ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) rbno + rlen > fbno + flen))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) if (rlen > bestrlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) bestrlen = rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) bestrbno = rbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) bestflen = flen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) bestfbno = fbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) if (rlen == args->maxlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (XFS_IS_CORRUPT(args->mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) rlen = bestrlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) rbno = bestrbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) flen = bestflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) fbno = bestfbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) args->wasfromfl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * Fix up the length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) args->len = rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (rlen < args->minlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (busy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) trace_xfs_alloc_size_busy(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) xfs_extent_busy_flush(args->mp, args->pag, busy_gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) goto out_nominleft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) xfs_alloc_fix_len(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) rlen = args->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) if (XFS_IS_CORRUPT(args->mp, rlen > flen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) * Allocate and initialize a cursor for the by-block tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) args->agno, XFS_BTNUM_BNO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) rbno, rlen, XFSA_FIXUP_CNT_OK)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) cnt_cur = bno_cur = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) args->len = rlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) args->agbno = rbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) if (XFS_IS_CORRUPT(args->mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) args->agbno + args->len >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) be32_to_cpu(agf->agf_length))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) trace_xfs_alloc_size_done(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) error0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) trace_xfs_alloc_size_error(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) if (cnt_cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (bno_cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) out_nominleft:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) trace_xfs_alloc_size_nominleft(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) args->agbno = NULLAGBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * Free the extent starting at agno/bno for length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) xfs_free_ag_extent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) struct xfs_buf *agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) xfs_agnumber_t agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) xfs_agblock_t bno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) xfs_extlen_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) const struct xfs_owner_info *oinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) enum xfs_ag_resv_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) struct xfs_mount *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) struct xfs_btree_cur *bno_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) struct xfs_btree_cur *cnt_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) xfs_agblock_t gtbno; /* start of right neighbor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) xfs_extlen_t gtlen; /* length of right neighbor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) xfs_agblock_t ltbno; /* start of left neighbor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) xfs_extlen_t ltlen; /* length of left neighbor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) xfs_agblock_t nbno; /* new starting block of freesp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) xfs_extlen_t nlen; /* new length of freespace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) int haveleft; /* have a left neighbor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) int haveright; /* have a right neighbor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) bno_cur = cnt_cur = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (!xfs_rmap_should_skip_owner_update(oinfo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) * Allocate and initialize a cursor for the by-block btree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) * Look for a neighboring block on the left (lower block numbers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) * that is contiguous with this space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if (haveleft) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * There is a block to our left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) * It's not contiguous, though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) if (ltbno + ltlen < bno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) haveleft = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) * If this failure happens the request to free this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) * space was invalid, it's (partly) already free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * Very bad.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) if (XFS_IS_CORRUPT(mp, ltbno + ltlen > bno)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) * Look for a neighboring block on the right (higher block numbers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) * that is contiguous with this space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) if (haveright) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * There is a block to our right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) * It's not contiguous, though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) if (bno + len < gtbno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) haveright = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) * If this failure happens the request to free this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) * space was invalid, it's (partly) already free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) * Very bad.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (XFS_IS_CORRUPT(mp, bno + len > gtbno)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * Now allocate and initialize a cursor for the by-size tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) * Have both left and right contiguous neighbors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * Merge all three into a single free block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) if (haveleft && haveright) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) * Delete the old by-size entry on the left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) if ((error = xfs_btree_delete(cnt_cur, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) * Delete the old by-size entry on the right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if ((error = xfs_btree_delete(cnt_cur, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * Delete the old by-block entry for the right block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) if ((error = xfs_btree_delete(bno_cur, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) * Move the by-block cursor back to the left neighbor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) * Check that this is the right record: delete didn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) * mangle the cursor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) xfs_agblock_t xxbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) xfs_extlen_t xxlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) if (XFS_IS_CORRUPT(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) i != 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) xxbno != ltbno ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) xxlen != ltlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) * Update remaining by-block entry to the new, joined block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) nbno = ltbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) nlen = len + ltlen + gtlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) * Have only a left contiguous neighbor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * Merge it together with the new freespace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) else if (haveleft) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) * Delete the old by-size entry on the left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) if ((error = xfs_btree_delete(cnt_cur, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) * Back up the by-block cursor to the left neighbor, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) * update its length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) nbno = ltbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) nlen = len + ltlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) * Have only a right contiguous neighbor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) * Merge it together with the new freespace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) else if (haveright) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) * Delete the old by-size entry on the right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) if ((error = xfs_btree_delete(cnt_cur, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) * Update the starting block and length of the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) * neighbor in the by-block tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) nbno = bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) nlen = len + gtlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * No contiguous neighbors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) * Insert the new freespace into the by-block tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) nbno = bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) nlen = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if ((error = xfs_btree_insert(bno_cur, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) bno_cur = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) * In all cases we need to insert the new freespace in the by-size tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if (XFS_IS_CORRUPT(mp, i != 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if ((error = xfs_btree_insert(cnt_cur, &i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) cnt_cur = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) * Update the freespace totals in the ag and superblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) error = xfs_alloc_update_counters(tp, agbp, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) xfs_ag_resv_free_extent(agbp->b_pag, type, tp, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) XFS_STATS_INC(mp, xs_freex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) XFS_STATS_ADD(mp, xs_freeb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) trace_xfs_free_extent(mp, agno, bno, len, type, haveleft, haveright);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) error0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) trace_xfs_free_extent(mp, agno, bno, len, type, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (bno_cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) if (cnt_cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * Visible (exported) allocation/free functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) * Some of these are used just by xfs_alloc_btree.c and this file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) * Compute and fill in value of m_ag_maxlevels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) xfs_alloc_compute_maxlevels(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) xfs_mount_t *mp) /* file system mount structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp->m_alloc_mnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) (mp->m_sb.sb_agblocks + 1) / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) * Find the length of the longest extent in an AG. The 'need' parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) * specifies how much space we're going to need for the AGFL and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) * 'reserved' parameter tells us how many blocks in this AG are reserved for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) * other callers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) xfs_extlen_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) xfs_alloc_longest_free_extent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) struct xfs_perag *pag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) xfs_extlen_t need,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) xfs_extlen_t reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) xfs_extlen_t delta = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) * If the AGFL needs a recharge, we'll have to subtract that from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) * longest extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (need > pag->pagf_flcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) delta = need - pag->pagf_flcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * If we cannot maintain others' reservations with space from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) * not-longest freesp extents, we'll have to subtract /that/ from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) * the longest extent too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) if (pag->pagf_freeblks - pag->pagf_longest < reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) delta += reserved - (pag->pagf_freeblks - pag->pagf_longest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) * If the longest extent is long enough to satisfy all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) * reservations and AGFL rules in place, we can return this extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) if (pag->pagf_longest > delta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) return min_t(xfs_extlen_t, pag->pag_mount->m_ag_max_usable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) pag->pagf_longest - delta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) /* Otherwise, let the caller try for 1 block if there's space. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * Compute the minimum length of the AGFL in the given AG. If @pag is NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) * return the largest possible minimum length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) xfs_alloc_min_freelist(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) struct xfs_perag *pag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) /* AG btrees have at least 1 level. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) static const uint8_t fake_levels[XFS_BTNUM_AGF] = {1, 1, 1};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) const uint8_t *levels = pag ? pag->pagf_levels : fake_levels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) unsigned int min_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) ASSERT(mp->m_ag_maxlevels > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) /* space needed by-bno freespace btree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) min_free = min_t(unsigned int, levels[XFS_BTNUM_BNOi] + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) mp->m_ag_maxlevels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) /* space needed by-size freespace btree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) min_free += min_t(unsigned int, levels[XFS_BTNUM_CNTi] + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) mp->m_ag_maxlevels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) /* space needed reverse mapping used space btree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) if (xfs_sb_version_hasrmapbt(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) min_free += min_t(unsigned int, levels[XFS_BTNUM_RMAPi] + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) mp->m_rmap_maxlevels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) return min_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) * Check if the operation we are fixing up the freelist for should go ahead or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) * not. If we are freeing blocks, we always allow it, otherwise the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) * is dependent on whether the size and shape of free space available will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) * permit the requested allocation to take place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) xfs_alloc_space_available(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) struct xfs_alloc_arg *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) xfs_extlen_t min_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) struct xfs_perag *pag = args->pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) xfs_extlen_t alloc_len, longest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) xfs_extlen_t reservation; /* blocks that are still reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) int available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) xfs_extlen_t agflcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) if (flags & XFS_ALLOC_FLAG_FREEING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) reservation = xfs_ag_resv_needed(pag, args->resv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) /* do we have enough contiguous free space for the allocation? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) longest = xfs_alloc_longest_free_extent(pag, min_free, reservation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) if (longest < alloc_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) * Do we have enough free space remaining for the allocation? Don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) * account extra agfl blocks because we are about to defer free them,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) * making them unavailable until the current transaction commits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) agflcount = min_t(xfs_extlen_t, pag->pagf_flcount, min_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) available = (int)(pag->pagf_freeblks + agflcount -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) reservation - min_free - args->minleft);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) if (available < (int)max(args->total, alloc_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) * Clamp maxlen to the amount of free space available for the actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) * extent allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) args->maxlen = available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) ASSERT(args->maxlen > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) ASSERT(args->maxlen >= args->minlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) xfs_free_agfl_block(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) xfs_agnumber_t agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) xfs_agblock_t agbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) struct xfs_buf *agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) struct xfs_owner_info *oinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) struct xfs_buf *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) error = xfs_free_ag_extent(tp, agbp, agno, agbno, 1, oinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) XFS_AG_RESV_AGFL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) error = xfs_trans_get_buf(tp, tp->t_mountp->m_ddev_targp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) XFS_AGB_TO_DADDR(tp->t_mountp, agno, agbno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) tp->t_mountp->m_bsize, 0, &bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) xfs_trans_binval(tp, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) * Check the agfl fields of the agf for inconsistency or corruption. The purpose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) * is to detect an agfl header padding mismatch between current and early v5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) * kernels. This problem manifests as a 1-slot size difference between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) * on-disk flcount and the active [first, last] range of a wrapped agfl. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) * may also catch variants of agfl count corruption unrelated to padding. Either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) * way, we'll reset the agfl and warn the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) * Return true if a reset is required before the agfl can be used, false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) xfs_agfl_needs_reset(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) struct xfs_agf *agf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) uint32_t f = be32_to_cpu(agf->agf_flfirst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) uint32_t l = be32_to_cpu(agf->agf_fllast);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) uint32_t c = be32_to_cpu(agf->agf_flcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) int agfl_size = xfs_agfl_size(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) int active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) /* no agfl header on v4 supers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) if (!xfs_sb_version_hascrc(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) * The agf read verifier catches severe corruption of these fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) * Repeat some sanity checks to cover a packed -> unpacked mismatch if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) * the verifier allows it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) if (f >= agfl_size || l >= agfl_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) if (c > agfl_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) * Check consistency between the on-disk count and the active range. An
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) * agfl padding mismatch manifests as an inconsistent flcount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) if (c && l >= f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) active = l - f + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) else if (c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) active = agfl_size - f + l + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) return active != c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) * agfl content cannot be trusted. Warn the user that a repair is required to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) * recover leaked blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) * The purpose of this mechanism is to handle filesystems affected by the agfl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) * header padding mismatch problem. A reset keeps the filesystem online with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) * relatively minor free space accounting inconsistency rather than suffer the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) * inevitable crash from use of an invalid agfl block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) xfs_agfl_reset(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) struct xfs_buf *agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) struct xfs_perag *pag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) struct xfs_mount *mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) struct xfs_agf *agf = agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) ASSERT(pag->pagf_agflreset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) "Please unmount and run xfs_repair.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) pag->pag_agno, pag->pagf_flcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) agf->agf_flfirst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) agf->agf_fllast = cpu_to_be32(xfs_agfl_size(mp) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) agf->agf_flcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) XFS_AGF_FLCOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) pag->pagf_flcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) pag->pagf_agflreset = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) * Defer an AGFL block free. This is effectively equivalent to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) * xfs_bmap_add_free() with some special handling particular to AGFL blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) * Deferring AGFL frees helps prevent log reservation overruns due to too many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) * allocation operations in a transaction. AGFL frees are prone to this problem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) * because for one they are always freed one at a time. Further, an immediate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) * AGFL block free can cause a btree join and require another block free before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) * the real allocation can proceed. Deferring the free disconnects freeing up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) * the AGFL slot from freeing the block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) xfs_defer_agfl_block(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) xfs_agnumber_t agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) xfs_fsblock_t agbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) struct xfs_owner_info *oinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) struct xfs_mount *mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) struct xfs_extent_free_item *new; /* new element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) ASSERT(xfs_bmap_free_item_zone != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) ASSERT(oinfo != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) new = kmem_cache_alloc(xfs_bmap_free_item_zone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) GFP_KERNEL | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) new->xefi_blockcount = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) new->xefi_oinfo = *oinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) new->xefi_skip_discard = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &new->xefi_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) * Decide whether to use this allocation group for this allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) * If so, fix up the btree freelist's size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) xfs_alloc_fix_freelist(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) struct xfs_alloc_arg *args, /* allocation argument structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) int flags) /* XFS_ALLOC_FLAG_... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) struct xfs_mount *mp = args->mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) struct xfs_perag *pag = args->pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) struct xfs_trans *tp = args->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) struct xfs_buf *agbp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) struct xfs_buf *agflbp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) struct xfs_alloc_arg targs; /* local allocation arguments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) xfs_agblock_t bno; /* freelist block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) xfs_extlen_t need; /* total blocks needed in freelist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) /* deferred ops (AGFL block frees) require permanent transactions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) if (!pag->pagf_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) /* Couldn't lock the AGF so skip this AG. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) if (error == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) goto out_no_agbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) * If this is a metadata preferred pag and we are user data then try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) * somewhere else if we are not being asked to try harder at this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) * point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) if (pag->pagf_metadata && (args->datatype & XFS_ALLOC_USERDATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) goto out_agbp_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) need = xfs_alloc_min_freelist(mp, pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) if (!xfs_alloc_space_available(args, need, flags |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) XFS_ALLOC_FLAG_CHECK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) goto out_agbp_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) * Get the a.g. freespace buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) * Can fail if we're not blocking on locks, and it's held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) if (!agbp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) error = xfs_alloc_read_agf(mp, tp, args->agno, flags, &agbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) /* Couldn't lock the AGF so skip this AG. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) if (error == -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) goto out_no_agbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) /* reset a padding mismatched agfl before final free space check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (pag->pagf_agflreset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) xfs_agfl_reset(tp, agbp, pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) /* If there isn't enough total space or single-extent, reject it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) need = xfs_alloc_min_freelist(mp, pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) if (!xfs_alloc_space_available(args, need, flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) goto out_agbp_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) * Make the freelist shorter if it's too long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) * Note that from this point onwards, we will always release the agf and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) * agfl buffers on error. This handles the case where we error out and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) * the buffers are clean or may not have been joined to the transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) * and hence need to be released manually. If they have been joined to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) * the transaction, then xfs_trans_brelse() will handle them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) * appropriately based on the recursion count and dirty state of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) * buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) * XXX (dgc): When we have lots of free space, does this buy us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) * anything other than extra overhead when we need to put more blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) * back on the free list? Maybe we should only do this when space is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) * getting low or the AGFL is more than half full?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) * big; the NORMAP flag prevents AGFL expand/shrink operations from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) * updating the rmapbt. Both flags are used in xfs_repair while we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) * rebuilding the rmapbt, and neither are used by the kernel. They're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) * both required to ensure that rmaps are correctly recorded for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) * repair/rmap.c in xfsprogs for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) memset(&targs, 0, sizeof(targs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) /* struct copy below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) if (flags & XFS_ALLOC_FLAG_NORMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) targs.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) targs.oinfo = XFS_RMAP_OINFO_AG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) goto out_agbp_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) /* defer agfl frees */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) targs.tp = tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) targs.mp = mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) targs.agbp = agbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) targs.agno = args->agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) targs.alignment = targs.minlen = targs.prod = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) targs.type = XFS_ALLOCTYPE_THIS_AG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) targs.pag = pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) goto out_agbp_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) /* Make the freelist longer if it's too short. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) while (pag->pagf_flcount < need) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) targs.agbno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) targs.maxlen = need - pag->pagf_flcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) targs.resv = XFS_AG_RESV_AGFL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) /* Allocate as many blocks as possible at once. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) error = xfs_alloc_ag_vextent(&targs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) goto out_agflbp_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) * Stop if we run out. Won't happen if callers are obeying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) * the restrictions correctly. Can happen for free calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) * on a completely full ag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) if (targs.agbno == NULLAGBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) if (flags & XFS_ALLOC_FLAG_FREEING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) goto out_agflbp_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) * Put each allocated block on the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) error = xfs_alloc_put_freelist(tp, agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) agflbp, bno, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) goto out_agflbp_relse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) xfs_trans_brelse(tp, agflbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) args->agbp = agbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) out_agflbp_relse:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) xfs_trans_brelse(tp, agflbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) out_agbp_relse:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) if (agbp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) xfs_trans_brelse(tp, agbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) out_no_agbp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) args->agbp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) * Get a block from the freelist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) * Returns with the buffer for the block gotten.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) xfs_alloc_get_freelist(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) xfs_trans_t *tp, /* transaction pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) xfs_buf_t *agbp, /* buffer containing the agf structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) xfs_agblock_t *bnop, /* block address retrieved from freelist */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) int btreeblk) /* destination is a AGF btree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) struct xfs_agf *agf = agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) xfs_agblock_t bno; /* block number returned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) __be32 *agfl_bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) int logflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) xfs_mount_t *mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) xfs_perag_t *pag; /* per allocation group data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) * Freelist is empty, give up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) if (!agf->agf_flcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) *bnop = NULLAGBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) * Read the array of free blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) error = xfs_alloc_read_agfl(mp, tp, be32_to_cpu(agf->agf_seqno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) &agflbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) * Get the block number and update the data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) agfl_bno = xfs_buf_to_agfl_bno(agflbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) be32_add_cpu(&agf->agf_flfirst, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) xfs_trans_brelse(tp, agflbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) agf->agf_flfirst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) pag = agbp->b_pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) ASSERT(!pag->pagf_agflreset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) be32_add_cpu(&agf->agf_flcount, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) xfs_trans_agflist_delta(tp, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) pag->pagf_flcount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) if (btreeblk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) be32_add_cpu(&agf->agf_btreeblks, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) pag->pagf_btreeblks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) logflags |= XFS_AGF_BTREEBLKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) xfs_alloc_log_agf(tp, agbp, logflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) *bnop = bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) * Log the given fields from the agf structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) xfs_alloc_log_agf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) xfs_trans_t *tp, /* transaction pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) xfs_buf_t *bp, /* buffer for a.g. freelist header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) int fields) /* mask of fields to be logged (XFS_AGF_...) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) int first; /* first byte offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) int last; /* last byte offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) static const short offsets[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) offsetof(xfs_agf_t, agf_magicnum),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) offsetof(xfs_agf_t, agf_versionnum),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) offsetof(xfs_agf_t, agf_seqno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) offsetof(xfs_agf_t, agf_length),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) offsetof(xfs_agf_t, agf_roots[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) offsetof(xfs_agf_t, agf_levels[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) offsetof(xfs_agf_t, agf_flfirst),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) offsetof(xfs_agf_t, agf_fllast),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) offsetof(xfs_agf_t, agf_flcount),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) offsetof(xfs_agf_t, agf_freeblks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) offsetof(xfs_agf_t, agf_longest),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) offsetof(xfs_agf_t, agf_btreeblks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) offsetof(xfs_agf_t, agf_uuid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) offsetof(xfs_agf_t, agf_rmap_blocks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) offsetof(xfs_agf_t, agf_refcount_blocks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) offsetof(xfs_agf_t, agf_refcount_root),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) offsetof(xfs_agf_t, agf_refcount_level),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) /* needed so that we don't log the whole rest of the structure: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) offsetof(xfs_agf_t, agf_spare64),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) sizeof(xfs_agf_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) trace_xfs_agf(tp->t_mountp, bp->b_addr, fields, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) xfs_trans_buf_set_type(tp, bp, XFS_BLFT_AGF_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) * Interface for inode allocation to force the pag data to be initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) xfs_alloc_pagf_init(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) xfs_mount_t *mp, /* file system mount structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) xfs_trans_t *tp, /* transaction pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) xfs_agnumber_t agno, /* allocation group number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) int flags) /* XFS_ALLOC_FLAGS_... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) xfs_buf_t *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) xfs_trans_brelse(tp, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) * Put the block on the freelist for the allocation group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) xfs_alloc_put_freelist(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) xfs_trans_t *tp, /* transaction pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) xfs_buf_t *agbp, /* buffer for a.g. freelist header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) xfs_buf_t *agflbp,/* buffer for a.g. free block array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) xfs_agblock_t bno, /* block being freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) int btreeblk) /* block came from a AGF btree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) struct xfs_mount *mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) struct xfs_agf *agf = agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) __be32 *blockp;/* pointer to array entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) int logflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) xfs_perag_t *pag; /* per allocation group data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) __be32 *agfl_bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) int startoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) be32_to_cpu(agf->agf_seqno), &agflbp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) be32_add_cpu(&agf->agf_fllast, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) agf->agf_fllast = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) pag = agbp->b_pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) ASSERT(!pag->pagf_agflreset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) be32_add_cpu(&agf->agf_flcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) xfs_trans_agflist_delta(tp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) pag->pagf_flcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) if (btreeblk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) be32_add_cpu(&agf->agf_btreeblks, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) pag->pagf_btreeblks--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) logflags |= XFS_AGF_BTREEBLKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) xfs_alloc_log_agf(tp, agbp, logflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) ASSERT(be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) agfl_bno = xfs_buf_to_agfl_bno(agflbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) blockp = &agfl_bno[be32_to_cpu(agf->agf_fllast)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) *blockp = cpu_to_be32(bno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) startoff = (char *)blockp - (char *)agflbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) xfs_alloc_log_agf(tp, agbp, logflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) xfs_trans_buf_set_type(tp, agflbp, XFS_BLFT_AGFL_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) xfs_trans_log_buf(tp, agflbp, startoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) startoff + sizeof(xfs_agblock_t) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) static xfs_failaddr_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) xfs_agf_verify(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) struct xfs_mount *mp = bp->b_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) struct xfs_agf *agf = bp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) if (xfs_sb_version_hascrc(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) if (!uuid_equal(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) if (!xfs_log_check_lsn(mp, be64_to_cpu(agf->agf_lsn)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) if (!xfs_verify_magic(bp, agf->agf_magicnum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) if (!(XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) be32_to_cpu(agf->agf_flfirst) < xfs_agfl_size(mp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) be32_to_cpu(agf->agf_fllast) < xfs_agfl_size(mp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) be32_to_cpu(agf->agf_flcount) <= xfs_agfl_size(mp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) if (be32_to_cpu(agf->agf_length) > mp->m_sb.sb_dblocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) if (be32_to_cpu(agf->agf_freeblks) < be32_to_cpu(agf->agf_longest) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) be32_to_cpu(agf->agf_freeblks) > be32_to_cpu(agf->agf_length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) if (be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) < 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) < 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]) > XFS_BTREE_MAXLEVELS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) (be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) < 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) be32_to_cpu(agf->agf_rmap_blocks) > be32_to_cpu(agf->agf_length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) * during growfs operations, the perag is not fully initialised,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) * so we can't use it for any useful checking. growfs ensures we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) * use it by using uncached buffers that don't have the perag attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) * so we can detect and avoid this problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) if (bp->b_pag && be32_to_cpu(agf->agf_seqno) != bp->b_pag->pag_agno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) if (xfs_sb_version_haslazysbcount(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) be32_to_cpu(agf->agf_btreeblks) > be32_to_cpu(agf->agf_length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) if (xfs_sb_version_hasreflink(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) be32_to_cpu(agf->agf_refcount_blocks) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) be32_to_cpu(agf->agf_length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) if (xfs_sb_version_hasreflink(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) (be32_to_cpu(agf->agf_refcount_level) < 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) be32_to_cpu(agf->agf_refcount_level) > XFS_BTREE_MAXLEVELS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) xfs_agf_read_verify(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) struct xfs_mount *mp = bp->b_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) xfs_failaddr_t fa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) if (xfs_sb_version_hascrc(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) !xfs_buf_verify_cksum(bp, XFS_AGF_CRC_OFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) xfs_verifier_error(bp, -EFSBADCRC, __this_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) fa = xfs_agf_verify(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_ALLOC_READ_AGF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) xfs_verifier_error(bp, -EFSCORRUPTED, fa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) xfs_agf_write_verify(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) struct xfs_mount *mp = bp->b_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) struct xfs_agf *agf = bp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) xfs_failaddr_t fa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) fa = xfs_agf_verify(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) if (fa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) xfs_verifier_error(bp, -EFSCORRUPTED, fa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) if (!xfs_sb_version_hascrc(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) if (bip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) agf->agf_lsn = cpu_to_be64(bip->bli_item.li_lsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) xfs_buf_update_cksum(bp, XFS_AGF_CRC_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) const struct xfs_buf_ops xfs_agf_buf_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) .name = "xfs_agf",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) .magic = { cpu_to_be32(XFS_AGF_MAGIC), cpu_to_be32(XFS_AGF_MAGIC) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) .verify_read = xfs_agf_read_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) .verify_write = xfs_agf_write_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) .verify_struct = xfs_agf_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) * Read in the allocation group header (free/alloc section).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) xfs_read_agf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) struct xfs_mount *mp, /* mount point structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) struct xfs_trans *tp, /* transaction pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) xfs_agnumber_t agno, /* allocation group number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) int flags, /* XFS_BUF_ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) struct xfs_buf **bpp) /* buffer for the ag freelist header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) trace_xfs_read_agf(mp, agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) ASSERT(agno != NULLAGNUMBER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) XFS_FSS_TO_BB(mp, 1), flags, bpp, &xfs_agf_buf_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) ASSERT(!(*bpp)->b_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) xfs_buf_set_ref(*bpp, XFS_AGF_REF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) * Read in the allocation group header (free/alloc section).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) xfs_alloc_read_agf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) struct xfs_mount *mp, /* mount point structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) struct xfs_trans *tp, /* transaction pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) xfs_agnumber_t agno, /* allocation group number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) int flags, /* XFS_ALLOC_FLAG_... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) struct xfs_buf **bpp) /* buffer for the ag freelist header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) struct xfs_agf *agf; /* ag freelist header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) struct xfs_perag *pag; /* per allocation group data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) trace_xfs_alloc_read_agf(mp, agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) /* We don't support trylock when freeing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) ASSERT((flags & (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK)) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) (XFS_ALLOC_FLAG_FREEING | XFS_ALLOC_FLAG_TRYLOCK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) ASSERT(agno != NULLAGNUMBER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) error = xfs_read_agf(mp, tp, agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) bpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) ASSERT(!(*bpp)->b_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) agf = (*bpp)->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) pag = (*bpp)->b_pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) if (!pag->pagf_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) pag->pagf_longest = be32_to_cpu(agf->agf_longest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) pag->pagf_levels[XFS_BTNUM_BNOi] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) pag->pagf_levels[XFS_BTNUM_CNTi] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) pag->pagf_levels[XFS_BTNUM_RMAPi] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) pag->pagf_refcount_level = be32_to_cpu(agf->agf_refcount_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) pag->pagf_init = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) else if (!XFS_FORCED_SHUTDOWN(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) * Allocate an extent (variable-size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) * Depending on the allocation type, we either look in a single allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) * group or loop over the allocation groups to find the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) xfs_alloc_vextent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) struct xfs_alloc_arg *args) /* allocation argument structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) xfs_agblock_t agsize; /* allocation group size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) int flags; /* XFS_ALLOC_FLAG_... locking flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) struct xfs_mount *mp; /* mount structure pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) xfs_agnumber_t sagno; /* starting allocation group number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) xfs_alloctype_t type; /* input allocation type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) int bump_rotor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) mp = args->mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) type = args->otype = args->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) args->agbno = NULLAGBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) * Just fix this up, for the case where the last a.g. is shorter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) * (or there's only one a.g.) and the caller couldn't easily figure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) * that out (xfs_bmap_alloc).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) agsize = mp->m_sb.sb_agblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) if (args->maxlen > agsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) args->maxlen = agsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) if (args->alignment == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) args->alignment = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) ASSERT(args->minlen <= args->maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) ASSERT(args->minlen <= agsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) ASSERT(args->mod < args->prod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) args->minlen > args->maxlen || args->minlen > agsize ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) args->mod >= args->prod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) args->fsbno = NULLFSBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) trace_xfs_alloc_vextent_badargs(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) case XFS_ALLOCTYPE_THIS_AG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) case XFS_ALLOCTYPE_NEAR_BNO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) case XFS_ALLOCTYPE_THIS_BNO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) * These three force us into a single a.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) args->pag = xfs_perag_get(mp, args->agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) error = xfs_alloc_fix_freelist(args, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) trace_xfs_alloc_vextent_nofix(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) if (!args->agbp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) trace_xfs_alloc_vextent_noagbp(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) if ((error = xfs_alloc_ag_vextent(args)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) case XFS_ALLOCTYPE_START_BNO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) * Try near allocation first, then anywhere-in-ag after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) * the first a.g. fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) if ((args->datatype & XFS_ALLOC_INITIAL_USER_DATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) (mp->m_flags & XFS_MOUNT_32BITINODES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) args->fsbno = XFS_AGB_TO_FSB(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) ((mp->m_agfrotor / rotorstep) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) mp->m_sb.sb_agcount), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) bump_rotor = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) args->type = XFS_ALLOCTYPE_NEAR_BNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) /* FALLTHROUGH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) case XFS_ALLOCTYPE_FIRST_AG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) * Rotate through the allocation groups looking for a winner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) if (type == XFS_ALLOCTYPE_FIRST_AG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) * Start with allocation group given by bno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) args->type = XFS_ALLOCTYPE_THIS_AG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) sagno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) * Start with the given allocation group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) flags = XFS_ALLOC_FLAG_TRYLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) * Loop over allocation groups twice; first time with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) * trylock set, second time without.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) args->pag = xfs_perag_get(mp, args->agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) error = xfs_alloc_fix_freelist(args, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) trace_xfs_alloc_vextent_nofix(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) * If we get a buffer back then the allocation will fly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) if (args->agbp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) if ((error = xfs_alloc_ag_vextent(args)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) trace_xfs_alloc_vextent_loopfailed(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) * Didn't work, figure out the next iteration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) if (args->agno == sagno &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) type == XFS_ALLOCTYPE_START_BNO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) args->type = XFS_ALLOCTYPE_THIS_AG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) * For the first allocation, we can try any AG to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) * space. However, if we already have allocated a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) * block, we don't want to try AGs whose number is below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) * sagno. Otherwise, we may end up with out-of-order
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) * locking of AGF, which might cause deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) if (++(args->agno) == mp->m_sb.sb_agcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) if (args->tp->t_firstblock != NULLFSBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) args->agno = sagno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) args->agno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) * Reached the starting a.g., must either be done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) * or switch to non-trylock mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) if (args->agno == sagno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) if (flags == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) args->agbno = NULLAGBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) trace_xfs_alloc_vextent_allfailed(args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) if (type == XFS_ALLOCTYPE_START_BNO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) args->agbno = XFS_FSB_TO_AGBNO(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) args->fsbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) args->type = XFS_ALLOCTYPE_NEAR_BNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) xfs_perag_put(args->pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) if (bump_rotor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) if (args->agno == sagno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) mp->m_agfrotor = (mp->m_agfrotor + 1) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) (mp->m_sb.sb_agcount * rotorstep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) mp->m_agfrotor = (args->agno * rotorstep + 1) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) (mp->m_sb.sb_agcount * rotorstep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) /* NOTREACHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) if (args->agbno == NULLAGBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) args->fsbno = NULLFSBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) ASSERT(args->len >= args->minlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) ASSERT(args->len <= args->maxlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) ASSERT(args->agbno % args->alignment == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) args->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) xfs_perag_put(args->pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) error0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) xfs_perag_put(args->pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) /* Ensure that the freelist is at full capacity. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) xfs_free_extent_fix_freelist(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) xfs_agnumber_t agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) struct xfs_buf **agbp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) struct xfs_alloc_arg args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) memset(&args, 0, sizeof(struct xfs_alloc_arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) args.tp = tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) args.mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) args.agno = agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) * validate that the block number is legal - the enables us to detect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) * and handle a silent filesystem corruption rather than crashing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) if (args.agno >= args.mp->m_sb.sb_agcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) args.pag = xfs_perag_get(args.mp, args.agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) ASSERT(args.pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) *agbp = args.agbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) xfs_perag_put(args.pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) * Free an extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) * Just break up the extent address and hand off to xfs_free_ag_extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) * after fixing up the freelist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) __xfs_free_extent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) xfs_fsblock_t bno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) xfs_extlen_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) const struct xfs_owner_info *oinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) enum xfs_ag_resv_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) bool skip_discard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) struct xfs_mount *mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) struct xfs_buf *agbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) struct xfs_agf *agf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) unsigned int busy_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) ASSERT(len != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) ASSERT(type != XFS_AG_RESV_AGFL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) if (XFS_TEST_ERROR(false, mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) XFS_ERRTAG_FREE_EXTENT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) agf = agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) if (XFS_IS_CORRUPT(mp, agbno >= mp->m_sb.sb_agblocks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) /* validate the extent size is legal now we have the agf locked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) if (XFS_IS_CORRUPT(mp, agbno + len > be32_to_cpu(agf->agf_length))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) if (skip_discard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) busy_flags |= XFS_EXTENT_BUSY_SKIP_DISCARD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) xfs_extent_busy_insert(tp, agno, agbno, len, busy_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) xfs_trans_brelse(tp, agbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) struct xfs_alloc_query_range_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) xfs_alloc_query_range_fn fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) void *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) /* Format btree record and pass to our callback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) xfs_alloc_query_range_helper(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) union xfs_btree_rec *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) struct xfs_alloc_query_range_info *query = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) struct xfs_alloc_rec_incore irec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) irec.ar_startblock = be32_to_cpu(rec->alloc.ar_startblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) irec.ar_blockcount = be32_to_cpu(rec->alloc.ar_blockcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) return query->fn(cur, &irec, query->priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) /* Find all free space within a given range of blocks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) xfs_alloc_query_range(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) struct xfs_alloc_rec_incore *low_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) struct xfs_alloc_rec_incore *high_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) xfs_alloc_query_range_fn fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) union xfs_btree_irec low_brec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) union xfs_btree_irec high_brec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) struct xfs_alloc_query_range_info query;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) low_brec.a = *low_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) high_brec.a = *high_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) query.priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) query.fn = fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) return xfs_btree_query_range(cur, &low_brec, &high_brec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) xfs_alloc_query_range_helper, &query);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) /* Find all free space records. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) xfs_alloc_query_all(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) xfs_alloc_query_range_fn fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) struct xfs_alloc_query_range_info query;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) ASSERT(cur->bc_btnum == XFS_BTNUM_BNO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) query.priv = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) query.fn = fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) return xfs_btree_query_all(cur, xfs_alloc_query_range_helper, &query);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) /* Is there a record covering a given extent? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) xfs_alloc_has_record(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) xfs_agblock_t bno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) xfs_extlen_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) bool *exists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) union xfs_btree_irec low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) union xfs_btree_irec high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) memset(&low, 0, sizeof(low));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) low.a.ar_startblock = bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) memset(&high, 0xFF, sizeof(high));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) high.a.ar_startblock = bno + len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) return xfs_btree_has_record(cur, &low, &high, exists);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) * Walk all the blocks in the AGFL. The @walk_fn can return any negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) * error code or XFS_ITER_*.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) xfs_agfl_walk(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) struct xfs_agf *agf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) struct xfs_buf *agflbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) xfs_agfl_walk_fn walk_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) __be32 *agfl_bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) agfl_bno = xfs_buf_to_agfl_bno(agflbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) i = be32_to_cpu(agf->agf_flfirst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) /* Nothing to walk in an empty AGFL. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) if (agf->agf_flcount == cpu_to_be32(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) /* Otherwise, walk from first to last, wrapping as needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) error = walk_fn(mp, be32_to_cpu(agfl_bno[i]), priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) if (i == be32_to_cpu(agf->agf_fllast))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) if (++i == xfs_agfl_size(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) }