^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "xfs_log_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "xfs_bit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "xfs_sb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "xfs_inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "xfs_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "xfs_ialloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "xfs_ialloc_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "xfs_alloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "xfs_errortag.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "xfs_error.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "xfs_bmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "xfs_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "xfs_buf_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "xfs_icreate_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "xfs_icache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "xfs_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "xfs_log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "xfs_rmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Lookup a record by ino in the btree given by cur.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) xfs_inobt_lookup(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct xfs_btree_cur *cur, /* btree cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) xfs_agino_t ino, /* starting inode of chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) xfs_lookup_t dir, /* <=, >=, == */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int *stat) /* success/failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) cur->bc_rec.i.ir_startino = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) cur->bc_rec.i.ir_holemask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) cur->bc_rec.i.ir_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) cur->bc_rec.i.ir_freecount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) cur->bc_rec.i.ir_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return xfs_btree_lookup(cur, dir, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * Update the record referred to by cur to the value given.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * This either works (return 0) or gets an EFSCORRUPTED error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) STATIC int /* error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) xfs_inobt_update(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct xfs_btree_cur *cur, /* btree cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) xfs_inobt_rec_incore_t *irec) /* btree record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) union xfs_btree_rec rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) rec.inobt.ir_startino = cpu_to_be32(irec->ir_startino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (xfs_sb_version_hassparseinodes(&cur->bc_mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) rec.inobt.ir_u.sp.ir_holemask = cpu_to_be16(irec->ir_holemask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) rec.inobt.ir_u.sp.ir_count = irec->ir_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) rec.inobt.ir_u.sp.ir_freecount = irec->ir_freecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* ir_holemask/ir_count not supported on-disk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) rec.inobt.ir_u.f.ir_freecount = cpu_to_be32(irec->ir_freecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) rec.inobt.ir_free = cpu_to_be64(irec->ir_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return xfs_btree_update(cur, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* Convert on-disk btree record to incore inobt record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) xfs_inobt_btrec_to_irec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) union xfs_btree_rec *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct xfs_inobt_rec_incore *irec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) irec->ir_startino = be32_to_cpu(rec->inobt.ir_startino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (xfs_sb_version_hassparseinodes(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) irec->ir_holemask = be16_to_cpu(rec->inobt.ir_u.sp.ir_holemask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) irec->ir_count = rec->inobt.ir_u.sp.ir_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) irec->ir_freecount = rec->inobt.ir_u.sp.ir_freecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * ir_holemask/ir_count not supported on-disk. Fill in hardcoded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * values for full inode chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) irec->ir_holemask = XFS_INOBT_HOLEMASK_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) irec->ir_count = XFS_INODES_PER_CHUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) irec->ir_freecount =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) be32_to_cpu(rec->inobt.ir_u.f.ir_freecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) irec->ir_free = be64_to_cpu(rec->inobt.ir_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * Get the data from the pointed-to record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) xfs_inobt_get_rec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct xfs_inobt_rec_incore *irec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int *stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct xfs_mount *mp = cur->bc_mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) xfs_agnumber_t agno = cur->bc_ag.agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) union xfs_btree_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) uint64_t realfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) error = xfs_btree_get_rec(cur, &rec, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (error || *stat == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) xfs_inobt_btrec_to_irec(mp, rec, irec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (!xfs_verify_agino(mp, agno, irec->ir_startino))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) goto out_bad_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (irec->ir_count < XFS_INODES_PER_HOLEMASK_BIT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) irec->ir_count > XFS_INODES_PER_CHUNK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) goto out_bad_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (irec->ir_freecount > XFS_INODES_PER_CHUNK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) goto out_bad_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* if there are no holes, return the first available offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (!xfs_inobt_issparse(irec->ir_holemask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) realfree = irec->ir_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) realfree = irec->ir_free & xfs_inobt_irec_to_allocmask(irec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (hweight64(realfree) != irec->ir_freecount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) goto out_bad_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) out_bad_rec:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) "%s Inode BTree record corruption in AG %d detected!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) cur->bc_btnum == XFS_BTNUM_INO ? "Used" : "Free", agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) "start inode 0x%x, count 0x%x, free 0x%x freemask 0x%llx, holemask 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) irec->ir_startino, irec->ir_count, irec->ir_freecount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) irec->ir_free, irec->ir_holemask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * Insert a single inobt record. Cursor must already point to desired location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) xfs_inobt_insert_rec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) uint16_t holemask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) uint8_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) int32_t freecount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) xfs_inofree_t free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int *stat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) cur->bc_rec.i.ir_holemask = holemask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) cur->bc_rec.i.ir_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) cur->bc_rec.i.ir_freecount = freecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) cur->bc_rec.i.ir_free = free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return xfs_btree_insert(cur, stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * Insert records describing a newly allocated inode chunk into the inobt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) xfs_inobt_insert(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct xfs_buf *agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) xfs_agino_t newino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) xfs_agino_t newlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) xfs_btnum_t btnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) struct xfs_btree_cur *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct xfs_agi *agi = agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) xfs_agino_t thisino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) for (thisino = newino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) thisino < newino + newlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) thisino += XFS_INODES_PER_CHUNK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) error = xfs_inobt_lookup(cur, thisino, XFS_LOOKUP_EQ, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ASSERT(i == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) error = xfs_inobt_insert_rec(cur, XFS_INOBT_HOLEMASK_FULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) XFS_INODES_PER_CHUNK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) XFS_INODES_PER_CHUNK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) XFS_INOBT_ALL_FREE, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ASSERT(i == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * Verify that the number of free inodes in the AGI is correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) xfs_check_agi_freecount(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct xfs_agi *agi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (cur->bc_nlevels == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) xfs_inobt_rec_incore_t rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int freecount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) error = xfs_inobt_get_rec(cur, &rec, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) freecount += rec.ir_freecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) error = xfs_btree_increment(cur, 0, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) } while (i == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (!XFS_FORCED_SHUTDOWN(cur->bc_mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) ASSERT(freecount == be32_to_cpu(agi->agi_freecount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #define xfs_check_agi_freecount(cur, agi) 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * Initialise a new set of inodes. When called without a transaction context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * (e.g. from recovery) we initiate a delayed write of the inode buffers rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * than logging them (which in a transaction context puts them into the AIL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * for writeback rather than the xfsbufd queue).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) xfs_ialloc_inode_init(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct list_head *buffer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) int icount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) xfs_agnumber_t agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) xfs_agblock_t agbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) xfs_agblock_t length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) unsigned int gen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct xfs_buf *fbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct xfs_dinode *free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) int nbufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) xfs_daddr_t d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) xfs_ino_t ino = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * Loop over the new block(s), filling in the inodes. For small block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * sizes, manipulate the inodes in buffers which are multiples of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * blocks size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) nbufs = length / M_IGEO(mp)->blocks_per_cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * Figure out what version number to use in the inodes we create. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * the superblock version has caught up to the one that supports the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * inode format, then use the new inode version. Otherwise use the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * version so that old kernels will continue to be able to use the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * For v3 inodes, we also need to write the inode number into the inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * so calculate the first inode number of the chunk here as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * XFS_AGB_TO_AGINO() only works within a filesystem block, not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * across multiple filesystem blocks (such as a cluster) and so cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * be used in the cluster buffer loop below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * Further, because we are writing the inode directly into the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * and calculating a CRC on the entire inode, we have ot log the entire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * inode so that the entire range the CRC covers is present in the log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * That means for v3 inode we log the entire buffer rather than just the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * inode cores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) version = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) ino = XFS_AGINO_TO_INO(mp, agno, XFS_AGB_TO_AGINO(mp, agbno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * log the initialisation that is about to take place as an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * logical operation. This means the transaction does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * need to log the physical changes to the inode buffers as log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * recovery will know what initialisation is actually needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * Hence we only need to log the buffers as "ordered" buffers so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * they track in the AIL as if they were physically logged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) if (tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) xfs_icreate_log(tp, agno, agbno, icount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) mp->m_sb.sb_inodesize, length, gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) version = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) for (j = 0; j < nbufs; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Get the block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) d = XFS_AGB_TO_DADDR(mp, agno, agbno +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) (j * M_IGEO(mp)->blocks_per_cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) error = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) mp->m_bsize * M_IGEO(mp)->blocks_per_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) XBF_UNMAPPED, &fbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* Initialize the inode buffers and log them appropriately. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) fbuf->b_ops = &xfs_inode_buf_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) xfs_buf_zero(fbuf, 0, BBTOB(fbuf->b_length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) int ioffset = i << mp->m_sb.sb_inodelog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) uint isize = XFS_DINODE_SIZE(&mp->m_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) free = xfs_make_iptr(mp, fbuf, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) free->di_version = version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) free->di_gen = cpu_to_be32(gen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) free->di_next_unlinked = cpu_to_be32(NULLAGINO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (version == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) free->di_ino = cpu_to_be64(ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ino++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) uuid_copy(&free->di_uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) &mp->m_sb.sb_meta_uuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) xfs_dinode_calc_crc(mp, free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) } else if (tp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* just log the inode core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) xfs_trans_log_buf(tp, fbuf, ioffset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ioffset + isize - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (tp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * Mark the buffer as an inode allocation buffer so it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * sticks in AIL at the point of this allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * transaction. This ensures the they are on disk before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * the tail of the log can be moved past this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * transaction (i.e. by preventing relogging from moving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * it forward in the log).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) xfs_trans_inode_alloc_buf(tp, fbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (version == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * Mark the buffer as ordered so that they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * not physically logged in the transaction but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * still tracked in the AIL as part of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * transaction and pin the log appropriately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) xfs_trans_ordered_buf(tp, fbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) fbuf->b_flags |= XBF_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) xfs_buf_delwri_queue(fbuf, buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) xfs_buf_relse(fbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * Align startino and allocmask for a recently allocated sparse chunk such that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * they are fit for insertion (or merge) into the on-disk inode btrees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * Background:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * When enabled, sparse inode support increases the inode alignment from cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * size to inode chunk size. This means that the minimum range between two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * non-adjacent inode records in the inobt is large enough for a full inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * record. This allows for cluster sized, cluster aligned block allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * without need to worry about whether the resulting inode record overlaps with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * another record in the tree. Without this basic rule, we would have to deal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * with the consequences of overlap by potentially undoing recent allocations in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * the inode allocation codepath.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * Because of this alignment rule (which is enforced on mount), there are two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * inobt possibilities for newly allocated sparse chunks. One is that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * aligned inode record for the chunk covers a range of inodes not already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * covered in the inobt (i.e., it is safe to insert a new sparse record). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * other is that a record already exists at the aligned startino that considers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * the newly allocated range as sparse. In the latter case, record content is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * merged in hope that sparse inode chunks fill to full chunks over time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) xfs_align_sparse_ino(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) xfs_agino_t *startino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) uint16_t *allocmask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) xfs_agblock_t agbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) xfs_agblock_t mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) agbno = XFS_AGINO_TO_AGBNO(mp, *startino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) mod = agbno % mp->m_sb.sb_inoalignmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (!mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* calculate the inode offset and align startino */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) offset = XFS_AGB_TO_AGINO(mp, mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) *startino -= offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * Since startino has been aligned down, left shift allocmask such that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * it continues to represent the same physical inodes relative to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * new startino.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) *allocmask <<= offset / XFS_INODES_PER_HOLEMASK_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * Determine whether the source inode record can merge into the target. Both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * records must be sparse, the inode ranges must match and there must be no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * allocation overlap between the records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) STATIC bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) __xfs_inobt_can_merge(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct xfs_inobt_rec_incore *trec, /* tgt record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) struct xfs_inobt_rec_incore *srec) /* src record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) uint64_t talloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) uint64_t salloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) /* records must cover the same inode range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (trec->ir_startino != srec->ir_startino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* both records must be sparse */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!xfs_inobt_issparse(trec->ir_holemask) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) !xfs_inobt_issparse(srec->ir_holemask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* both records must track some inodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (!trec->ir_count || !srec->ir_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) /* can't exceed capacity of a full record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (trec->ir_count + srec->ir_count > XFS_INODES_PER_CHUNK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) /* verify there is no allocation overlap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) talloc = xfs_inobt_irec_to_allocmask(trec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) salloc = xfs_inobt_irec_to_allocmask(srec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (talloc & salloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * Merge the source inode record into the target. The caller must call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * __xfs_inobt_can_merge() to ensure the merge is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) __xfs_inobt_rec_merge(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct xfs_inobt_rec_incore *trec, /* target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct xfs_inobt_rec_incore *srec) /* src */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) ASSERT(trec->ir_startino == srec->ir_startino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* combine the counts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) trec->ir_count += srec->ir_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) trec->ir_freecount += srec->ir_freecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Merge the holemask and free mask. For both fields, 0 bits refer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * allocated inodes. We combine the allocated ranges with bitwise AND.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) trec->ir_holemask &= srec->ir_holemask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) trec->ir_free &= srec->ir_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * Insert a new sparse inode chunk into the associated inode btree. The inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * record for the sparse chunk is pre-aligned to a startino that should match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * any pre-existing sparse inode record in the tree. This allows sparse chunks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * to fill over time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * This function supports two modes of handling preexisting records depending on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * the merge flag. If merge is true, the provided record is merged with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * existing record and updated in place. The merged record is returned in nrec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * If merge is false, an existing record is replaced with the provided record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * If no preexisting record exists, the provided record is always inserted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * It is considered corruption if a merge is requested and not possible. Given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * the sparse inode alignment constraints, this should never happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) xfs_inobt_insert_sprec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct xfs_buf *agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int btnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct xfs_inobt_rec_incore *nrec, /* in/out: new/merged rec. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) bool merge) /* merge or replace */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct xfs_btree_cur *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct xfs_agi *agi = agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct xfs_inobt_rec_incore rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, btnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /* the new record is pre-aligned so we know where to look */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) error = xfs_inobt_lookup(cur, nrec->ir_startino, XFS_LOOKUP_EQ, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* if nothing there, insert a new record and return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) error = xfs_inobt_insert_rec(cur, nrec->ir_holemask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) nrec->ir_count, nrec->ir_freecount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) nrec->ir_free, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * A record exists at this startino. Merge or replace the record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * depending on what we've been asked to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (merge) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) error = xfs_inobt_get_rec(cur, &rec, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (XFS_IS_CORRUPT(mp, rec.ir_startino != nrec->ir_startino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * This should never fail. If we have coexisting records that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * cannot merge, something is seriously wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (XFS_IS_CORRUPT(mp, !__xfs_inobt_can_merge(nrec, &rec))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) trace_xfs_irec_merge_pre(mp, agno, rec.ir_startino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) rec.ir_holemask, nrec->ir_startino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) nrec->ir_holemask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* merge to nrec to output the updated record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) __xfs_inobt_rec_merge(nrec, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) trace_xfs_irec_merge_post(mp, agno, nrec->ir_startino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) nrec->ir_holemask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) error = xfs_inobt_rec_check_count(mp, nrec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) error = xfs_inobt_update(cur, nrec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * Allocate new inodes in the allocation group specified by agbp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * Return 0 for success, else error code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) xfs_ialloc_ag_alloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct xfs_buf *agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) int *alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct xfs_agi *agi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct xfs_alloc_arg args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) xfs_agnumber_t agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) xfs_agino_t newino; /* new first inode's number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) xfs_agino_t newlen; /* new number of inodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) int isaligned = 0; /* inode allocation at stripe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* unit boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /* init. to full chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) uint16_t allocmask = (uint16_t) -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct xfs_inobt_rec_incore rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) struct xfs_perag *pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct xfs_ino_geometry *igeo = M_IGEO(tp->t_mountp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) int do_sparse = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) memset(&args, 0, sizeof(args));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) args.tp = tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) args.mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) args.fsbno = NULLFSBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) args.oinfo = XFS_RMAP_OINFO_INODES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /* randomly do sparse inode allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (xfs_sb_version_hassparseinodes(&tp->t_mountp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) igeo->ialloc_min_blks < igeo->ialloc_blks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) do_sparse = prandom_u32() & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * Locking will ensure that we don't have two callers in here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * at one time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) newlen = igeo->ialloc_inos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (igeo->maxicount &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) percpu_counter_read_positive(&args.mp->m_icount) + newlen >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) igeo->maxicount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) args.minlen = args.maxlen = igeo->ialloc_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * First try to allocate inodes contiguous with the last-allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * chunk of inodes. If the filesystem is striped, this will fill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * an entire stripe unit with inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) agi = agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) newino = be32_to_cpu(agi->agi_newino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) agno = be32_to_cpu(agi->agi_seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) igeo->ialloc_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (do_sparse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) goto sparse_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (likely(newino != NULLAGINO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) (args.agbno < be32_to_cpu(agi->agi_length)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) args.type = XFS_ALLOCTYPE_THIS_BNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) args.prod = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * We need to take into account alignment here to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * we don't modify the free list if we fail to have an exact
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * block. If we don't have an exact match, and every oher
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * attempt allocation attempt fails, we'll end up cancelling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * a dirty transaction and shutting down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * For an exact allocation, alignment must be 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * however we need to take cluster alignment into account when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * fixing up the freelist. Use the minalignslop field to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * indicate that extra blocks might be required for alignment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * but not to use them in the actual exact allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) args.alignment = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) args.minalignslop = igeo->cluster_align - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /* Allow space for the inode btree to split. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) args.minleft = igeo->inobt_maxlevels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if ((error = xfs_alloc_vextent(&args)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * This request might have dirtied the transaction if the AG can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * satisfy the request, but the exact block was not available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * If the allocation did fail, subsequent requests will relax
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * the exact agbno requirement and increase the alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * instead. It is critical that the total size of the request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * (len + alignment + slop) does not increase from this point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * on, so reset minalignslop to ensure it is not included in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * subsequent requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) args.minalignslop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (unlikely(args.fsbno == NULLFSBLOCK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * Set the alignment for the allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * If stripe alignment is turned on then align at stripe unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * If the cluster size is smaller than a filesystem block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * then we're doing I/O for inodes in filesystem block size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * pieces, so don't need alignment anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) isaligned = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (igeo->ialloc_align) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) args.alignment = args.mp->m_dalign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) isaligned = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) args.alignment = igeo->cluster_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * Need to figure out where to allocate the inode blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * Ideally they should be spaced out through the a.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * For now, just allocate blocks up front.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) args.agbno = be32_to_cpu(agi->agi_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * Allocate a fixed-size extent of inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) args.type = XFS_ALLOCTYPE_NEAR_BNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) args.prod = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * Allow space for the inode btree to split.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) args.minleft = igeo->inobt_maxlevels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if ((error = xfs_alloc_vextent(&args)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * If stripe alignment is turned on, then try again with cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (isaligned && args.fsbno == NULLFSBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) args.type = XFS_ALLOCTYPE_NEAR_BNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) args.agbno = be32_to_cpu(agi->agi_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) args.alignment = igeo->cluster_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) if ((error = xfs_alloc_vextent(&args)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * Finally, try a sparse allocation if the filesystem supports it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * the sparse allocation length is smaller than a full chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (xfs_sb_version_hassparseinodes(&args.mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) igeo->ialloc_min_blks < igeo->ialloc_blks &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) args.fsbno == NULLFSBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) sparse_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) args.type = XFS_ALLOCTYPE_NEAR_BNO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) args.agbno = be32_to_cpu(agi->agi_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) args.alignment = args.mp->m_sb.sb_spino_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) args.prod = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) args.minlen = igeo->ialloc_min_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) args.maxlen = args.minlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * The inode record will be aligned to full chunk size. We must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * prevent sparse allocation from AG boundaries that result in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * invalid inode records, such as records that start at agbno 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * or extend beyond the AG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * Set min agbno to the first aligned, non-zero agbno and max to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * the last aligned agbno that is at least one full chunk from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * the end of the AG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) args.min_agbno = args.mp->m_sb.sb_inoalignmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) args.max_agbno = round_down(args.mp->m_sb.sb_agblocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) args.mp->m_sb.sb_inoalignmt) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) igeo->ialloc_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) error = xfs_alloc_vextent(&args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) newlen = XFS_AGB_TO_AGINO(args.mp, args.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) ASSERT(newlen <= XFS_INODES_PER_CHUNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) allocmask = (1 << (newlen / XFS_INODES_PER_HOLEMASK_BIT)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (args.fsbno == NULLFSBLOCK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) *alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ASSERT(args.len == args.minlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * Stamp and write the inode buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * Seed the new inode cluster with a random generation number. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * prevents short-term reuse of generation numbers if a chunk is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * freed and then immediately reallocated. We use random numbers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * rather than a linear progression to prevent the next generation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * number from being easily guessable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) error = xfs_ialloc_inode_init(args.mp, tp, NULL, newlen, agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) args.agbno, args.len, prandom_u32());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * Convert the results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) newino = XFS_AGB_TO_AGINO(args.mp, args.agbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) if (xfs_inobt_issparse(~allocmask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * We've allocated a sparse chunk. Align the startino and mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) xfs_align_sparse_ino(args.mp, &newino, &allocmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) rec.ir_startino = newino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) rec.ir_holemask = ~allocmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) rec.ir_count = newlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) rec.ir_freecount = newlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) rec.ir_free = XFS_INOBT_ALL_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * Insert the sparse record into the inobt and allow for a merge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * if necessary. If a merge does occur, rec is updated to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * merged record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) error = xfs_inobt_insert_sprec(args.mp, tp, agbp, XFS_BTNUM_INO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) &rec, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) if (error == -EFSCORRUPTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) xfs_alert(args.mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) "invalid sparse inode record: ino 0x%llx holemask 0x%x count %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) XFS_AGINO_TO_INO(args.mp, agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) rec.ir_startino),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) rec.ir_holemask, rec.ir_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) xfs_force_shutdown(args.mp, SHUTDOWN_CORRUPT_INCORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * We can't merge the part we've just allocated as for the inobt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * due to finobt semantics. The original record may or may not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) * exist independent of whether physical inodes exist in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * sparse chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * We must update the finobt record based on the inobt record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * rec contains the fully merged and up to date inobt record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * from the previous call. Set merge false to replace any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * existing record with this one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) error = xfs_inobt_insert_sprec(args.mp, tp, agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) XFS_BTNUM_FINO, &rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* full chunk - insert new records to both btrees */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) error = xfs_inobt_insert(args.mp, tp, agbp, newino, newlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) XFS_BTNUM_INO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (xfs_sb_version_hasfinobt(&args.mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) error = xfs_inobt_insert(args.mp, tp, agbp, newino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) newlen, XFS_BTNUM_FINO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) * Update AGI counts and newino.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) be32_add_cpu(&agi->agi_count, newlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) be32_add_cpu(&agi->agi_freecount, newlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) pag = agbp->b_pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) pag->pagi_freecount += newlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) pag->pagi_count += newlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) agi->agi_newino = cpu_to_be32(newino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * Log allocation group header fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) xfs_ialloc_log_agi(tp, agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * Modify/log superblock values for inode count and inode free count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) *alloc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) STATIC xfs_agnumber_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) xfs_ialloc_next_ag(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) xfs_mount_t *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) xfs_agnumber_t agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) spin_lock(&mp->m_agirotor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) agno = mp->m_agirotor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) if (++mp->m_agirotor >= mp->m_maxagi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) mp->m_agirotor = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) spin_unlock(&mp->m_agirotor_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * Select an allocation group to look for a free inode in, based on the parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) * inode and the mode. Return the allocation group buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) STATIC xfs_agnumber_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) xfs_ialloc_ag_select(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) xfs_trans_t *tp, /* transaction pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) xfs_ino_t parent, /* parent directory inode number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) umode_t mode) /* bits set to indicate file type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) xfs_agnumber_t agcount; /* number of ag's in the filesystem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) xfs_agnumber_t agno; /* current ag number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) int flags; /* alloc buffer locking flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) xfs_extlen_t ineed; /* blocks needed for inode allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) xfs_extlen_t longest = 0; /* longest extent available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) xfs_mount_t *mp; /* mount point structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) int needspace; /* file mode implies space allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) xfs_perag_t *pag; /* per allocation group data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) xfs_agnumber_t pagno; /* parent (starting) ag number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) * Files of these types need at least one block if length > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * (and they won't fit in the inode, but that's hard to figure out).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) agcount = mp->m_maxagi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) if (S_ISDIR(mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) pagno = xfs_ialloc_next_ag(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) pagno = XFS_INO_TO_AGNO(mp, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (pagno >= agcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) pagno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) ASSERT(pagno < agcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * Loop through allocation groups, looking for one with a little
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * free space in it. Note we don't look for free inodes, exactly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * Instead, we include whether there is a need to allocate inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * to mean that blocks must be allocated for them,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * if none are currently free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) agno = pagno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) flags = XFS_ALLOC_FLAG_TRYLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) pag = xfs_perag_get(mp, agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (!pag->pagi_inodeok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) xfs_ialloc_next_ag(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) goto nextag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) if (!pag->pagi_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) error = xfs_ialloc_pagi_init(mp, tp, agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) goto nextag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (pag->pagi_freecount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) xfs_perag_put(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) if (!pag->pagf_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) error = xfs_alloc_pagf_init(mp, tp, agno, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) goto nextag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * Check that there is enough free space for the file plus a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * chunk of inodes if we need to allocate some. If this is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * first pass across the AGs, take into account the potential
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * space needed for alignment of inode chunks when checking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * longest contiguous free space in the AG - this prevents us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * from getting ENOSPC because we have free space larger than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * ialloc_blks but alignment constraints prevent us from using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * If we can't find an AG with space for full alignment slack to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * be taken into account, we must be near ENOSPC in all AGs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * Hence we don't include alignment for the second pass and so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * if we fail allocation due to alignment issues then it is most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * likely a real ENOSPC condition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) ineed = M_IGEO(mp)->ialloc_min_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (flags && ineed > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ineed += M_IGEO(mp)->cluster_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) longest = pag->pagf_longest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (!longest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) longest = pag->pagf_flcount > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) if (pag->pagf_freeblks >= needspace + ineed &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) longest >= ineed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) xfs_perag_put(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) nextag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) xfs_perag_put(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * No point in iterating over the rest, if we're shutting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (XFS_FORCED_SHUTDOWN(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) return NULLAGNUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) agno++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (agno >= agcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) agno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (agno == pagno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (flags == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) return NULLAGNUMBER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * Try to retrieve the next record to the left/right from the current one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) xfs_ialloc_next_rec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) xfs_inobt_rec_incore_t *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) int *done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) int left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if (left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) error = xfs_btree_decrement(cur, 0, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) error = xfs_btree_increment(cur, 0, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) *done = !i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) error = xfs_inobt_get_rec(cur, rec, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) xfs_ialloc_get_rec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) xfs_agino_t agino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) xfs_inobt_rec_incore_t *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) int *done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_EQ, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) *done = !i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) error = xfs_inobt_get_rec(cur, rec, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * Return the offset of the first free inode in the record. If the inode chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * is sparsely allocated, we convert the record holemask to inode granularity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * and mask off the unallocated regions from the inode free mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) xfs_inobt_first_free_inode(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct xfs_inobt_rec_incore *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) xfs_inofree_t realfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) /* if there are no holes, return the first available offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (!xfs_inobt_issparse(rec->ir_holemask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) return xfs_lowbit64(rec->ir_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) realfree = xfs_inobt_irec_to_allocmask(rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) realfree &= rec->ir_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) return xfs_lowbit64(realfree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * Allocate an inode using the inobt-only algorithm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) xfs_dialloc_ag_inobt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct xfs_buf *agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) xfs_ino_t parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) xfs_ino_t *inop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct xfs_mount *mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct xfs_agi *agi = agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) struct xfs_perag *pag = agbp->b_pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) struct xfs_btree_cur *cur, *tcur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) struct xfs_inobt_rec_incore rec, trec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) xfs_ino_t ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) int searchdistance = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) ASSERT(pag->pagi_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) ASSERT(pag->pagi_inodeok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) ASSERT(pag->pagi_freecount > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) restart_pagno:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * If pagino is 0 (this is the root inode allocation) use newino.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * This must work because we've just allocated some.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (!pagino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) pagino = be32_to_cpu(agi->agi_newino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) error = xfs_check_agi_freecount(cur, agi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * If in the same AG as the parent, try to get near the parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (pagno == agno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) int doneleft; /* done, to the left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) int doneright; /* done, to the right */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) error = xfs_inobt_get_rec(cur, &rec, &j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) if (XFS_IS_CORRUPT(mp, j != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (rec.ir_freecount > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * Found a free inode in the same chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * as the parent, done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) goto alloc_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * In the same AG as parent, but parent's chunk is full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /* duplicate the cursor, search left & right simultaneously */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) error = xfs_btree_dup_cursor(cur, &tcur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * Skip to last blocks looked up if same parent inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) if (pagino != NULLAGINO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) pag->pagl_pagino == pagino &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) pag->pagl_leftrec != NULLAGINO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) pag->pagl_rightrec != NULLAGINO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) &trec, &doneleft);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) goto error1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) &rec, &doneright);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) goto error1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) /* search left with tcur, back up 1 record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) goto error1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /* search right with cur, go forward 1 record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) goto error1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * Loop until we find an inode chunk with a free inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) while (--searchdistance > 0 && (!doneleft || !doneright)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) int useleft; /* using left inode chunk this time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) /* figure out the closer block if both are valid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) if (!doneleft && !doneright) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) useleft = pagino -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) rec.ir_startino - pagino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) useleft = !doneleft;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /* free inodes to the left? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (useleft && trec.ir_freecount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) cur = tcur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) pag->pagl_leftrec = trec.ir_startino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) pag->pagl_rightrec = rec.ir_startino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) pag->pagl_pagino = pagino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) rec = trec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) goto alloc_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) /* free inodes to the right? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (!useleft && rec.ir_freecount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) pag->pagl_leftrec = trec.ir_startino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) pag->pagl_rightrec = rec.ir_startino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) pag->pagl_pagino = pagino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) goto alloc_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) /* get next record to check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) if (useleft) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) error = xfs_ialloc_next_rec(tcur, &trec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) &doneleft, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) error = xfs_ialloc_next_rec(cur, &rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) &doneright, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) goto error1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) if (searchdistance <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * Not in range - save last search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * location and allocate a new inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) pag->pagl_leftrec = trec.ir_startino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) pag->pagl_rightrec = rec.ir_startino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) pag->pagl_pagino = pagino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * We've reached the end of the btree. because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * we are only searching a small chunk of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * btree each search, there is obviously free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * inodes closer to the parent inode than we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * are now. restart the search again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) pag->pagl_pagino = NULLAGINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) pag->pagl_leftrec = NULLAGINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) pag->pagl_rightrec = NULLAGINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) goto restart_pagno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * In a different AG from the parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) * See if the most recently allocated block has any free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) XFS_LOOKUP_EQ, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (i == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) error = xfs_inobt_get_rec(cur, &rec, &j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (j == 1 && rec.ir_freecount > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * The last chunk allocated in the group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * still has a free inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) goto alloc_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * None left in the last group, search the whole AG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) error = xfs_inobt_get_rec(cur, &rec, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) if (rec.ir_freecount > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) error = xfs_btree_increment(cur, 0, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) alloc_inode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) offset = xfs_inobt_first_free_inode(&rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) ASSERT(offset >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) ASSERT(offset < XFS_INODES_PER_CHUNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) XFS_INODES_PER_CHUNK) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) rec.ir_free &= ~XFS_INOBT_MASK(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) rec.ir_freecount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) error = xfs_inobt_update(cur, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) be32_add_cpu(&agi->agi_freecount, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) pag->pagi_freecount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) error = xfs_check_agi_freecount(cur, agi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) *inop = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) error1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) error0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * Use the free inode btree to allocate an inode based on distance from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * parent. Note that the provided cursor may be deleted and replaced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) xfs_dialloc_ag_finobt_near(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) xfs_agino_t pagino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) struct xfs_btree_cur **ocur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct xfs_inobt_rec_incore *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) struct xfs_btree_cur *lcur = *ocur; /* left search cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) struct xfs_btree_cur *rcur; /* right search cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) struct xfs_inobt_rec_incore rrec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) error = xfs_inobt_lookup(lcur, pagino, XFS_LOOKUP_LE, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (i == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) error = xfs_inobt_get_rec(lcur, rec, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * See if we've landed in the parent inode record. The finobt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * only tracks chunks with at least one free inode, so record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * existence is enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (pagino >= rec->ir_startino &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) pagino < (rec->ir_startino + XFS_INODES_PER_CHUNK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) error = xfs_btree_dup_cursor(lcur, &rcur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) error = xfs_inobt_lookup(rcur, pagino, XFS_LOOKUP_GE, &j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) goto error_rcur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (j == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) error = xfs_inobt_get_rec(rcur, &rrec, &j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) goto error_rcur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (XFS_IS_CORRUPT(lcur->bc_mp, j != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) goto error_rcur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (XFS_IS_CORRUPT(lcur->bc_mp, i != 1 && j != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) goto error_rcur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (i == 1 && j == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) * Both the left and right records are valid. Choose the closer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) * inode chunk to the target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if ((pagino - rec->ir_startino + XFS_INODES_PER_CHUNK - 1) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) (rrec.ir_startino - pagino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) *rec = rrec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) *ocur = rcur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) } else if (j == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /* only the right record is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) *rec = rrec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) xfs_btree_del_cursor(lcur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) *ocur = rcur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) } else if (i == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) /* only the left record is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) xfs_btree_del_cursor(rcur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) error_rcur:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) xfs_btree_del_cursor(rcur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) * Use the free inode btree to find a free inode based on a newino hint. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * the hint is NULL, find the first free inode in the AG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) xfs_dialloc_ag_finobt_newino(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) struct xfs_agi *agi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) struct xfs_inobt_rec_incore *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) XFS_LOOKUP_EQ, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) if (i == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) error = xfs_inobt_get_rec(cur, rec, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * Find the first inode available in the AG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) error = xfs_inobt_get_rec(cur, rec, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * Update the inobt based on a modification made to the finobt. Also ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * the records from both trees are equivalent post-modification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) xfs_dialloc_ag_update_inobt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) struct xfs_btree_cur *cur, /* inobt cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) struct xfs_inobt_rec_incore *frec, /* finobt record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) int offset) /* inode offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) struct xfs_inobt_rec_incore rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) error = xfs_inobt_lookup(cur, frec->ir_startino, XFS_LOOKUP_EQ, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) error = xfs_inobt_get_rec(cur, &rec, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (XFS_IS_CORRUPT(cur->bc_mp, i != 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) ASSERT((XFS_AGINO_TO_OFFSET(cur->bc_mp, rec.ir_startino) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) XFS_INODES_PER_CHUNK) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) rec.ir_free &= ~XFS_INOBT_MASK(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) rec.ir_freecount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (XFS_IS_CORRUPT(cur->bc_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) rec.ir_free != frec->ir_free ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) rec.ir_freecount != frec->ir_freecount))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) return xfs_inobt_update(cur, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * Allocate an inode using the free inode btree, if available. Otherwise, fall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * back to the inobt search algorithm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * The caller selected an AG for us, and made sure that free inodes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) * available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) xfs_dialloc_ag(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) struct xfs_buf *agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) xfs_ino_t parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) xfs_ino_t *inop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) struct xfs_mount *mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) struct xfs_agi *agi = agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) struct xfs_btree_cur *cur; /* finobt cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) struct xfs_btree_cur *icur; /* inobt cursor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) struct xfs_inobt_rec_incore rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) xfs_ino_t ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (!xfs_sb_version_hasfinobt(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) return xfs_dialloc_ag_inobt(tp, agbp, parent, inop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) * If pagino is 0 (this is the root inode allocation) use newino.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) * This must work because we've just allocated some.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (!pagino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) pagino = be32_to_cpu(agi->agi_newino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) error = xfs_check_agi_freecount(cur, agi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) goto error_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) * The search algorithm depends on whether we're in the same AG as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) * parent. If so, find the closest available inode to the parent. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) * not, consider the agi hint or find the first free inode in the AG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (agno == pagno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) error = xfs_dialloc_ag_finobt_near(pagino, &cur, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) error = xfs_dialloc_ag_finobt_newino(agi, cur, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) goto error_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) offset = xfs_inobt_first_free_inode(&rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) ASSERT(offset >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) ASSERT(offset < XFS_INODES_PER_CHUNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) XFS_INODES_PER_CHUNK) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) * Modify or remove the finobt record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) rec.ir_free &= ~XFS_INOBT_MASK(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) rec.ir_freecount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) if (rec.ir_freecount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) error = xfs_inobt_update(cur, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) error = xfs_btree_delete(cur, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) goto error_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) * The finobt has now been updated appropriately. We haven't updated the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * agi and superblock yet, so we can create an inobt cursor and validate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) * the original freecount. If all is well, make the equivalent update to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) * the inobt using the finobt record and offset information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) icur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) error = xfs_check_agi_freecount(icur, agi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) goto error_icur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) error = xfs_dialloc_ag_update_inobt(icur, &rec, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) goto error_icur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * Both trees have now been updated. We must update the perag and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * superblock before we can check the freecount for each btree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) be32_add_cpu(&agi->agi_freecount, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) agbp->b_pag->pagi_freecount--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) error = xfs_check_agi_freecount(icur, agi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) goto error_icur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) error = xfs_check_agi_freecount(cur, agi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) goto error_icur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) *inop = ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) error_icur:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) xfs_btree_del_cursor(icur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) error_cur:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) * Allocate an inode on disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) * Mode is used to tell whether the new inode will need space, and whether it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) * is a directory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * This function is designed to be called twice if it has to do an allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * to make more free inodes. On the first call, *IO_agbp should be set to NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * If an inode is available without having to performn an allocation, an inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) * number is returned. In this case, *IO_agbp is set to NULL. If an allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) * needs to be done, xfs_dialloc returns the current AGI buffer in *IO_agbp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) * The caller should then commit the current transaction, allocate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) * new transaction, and call xfs_dialloc() again, passing in the previous value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) * of *IO_agbp. IO_agbp should be held across the transactions. Since the AGI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) * buffer is locked across the two calls, the second call is guaranteed to have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) * a free inode available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * Once we successfully pick an inode its number is returned and the on-disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) * data structures are updated. The inode itself is not read in, since doing so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) * would break ordering constraints with xfs_reclaim.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) xfs_dialloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) xfs_ino_t parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) umode_t mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) struct xfs_buf **IO_agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) xfs_ino_t *inop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) struct xfs_mount *mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) struct xfs_buf *agbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) xfs_agnumber_t agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) int ialloced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) int noroom = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) xfs_agnumber_t start_agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) struct xfs_perag *pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) struct xfs_ino_geometry *igeo = M_IGEO(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) int okalloc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (*IO_agbp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) * If the caller passes in a pointer to the AGI buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) * continue where we left off before. In this case, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) * know that the allocation group has free inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) agbp = *IO_agbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) goto out_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) * We do not have an agbp, so select an initial allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) * group for inode allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) start_agno = xfs_ialloc_ag_select(tp, parent, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) if (start_agno == NULLAGNUMBER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) *inop = NULLFSINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) * If we have already hit the ceiling of inode blocks then clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) * okalloc so we scan all available agi structures for a free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) * inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) * Read rough value of mp->m_icount by percpu_counter_read_positive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) * which will sacrifice the preciseness but improve the performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) if (igeo->maxicount &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) percpu_counter_read_positive(&mp->m_icount) + igeo->ialloc_inos
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) > igeo->maxicount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) noroom = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) okalloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) * Loop until we find an allocation group that either has free inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) * or in which we can allocate some inodes. Iterate through the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) * allocation groups upward, wrapping at the end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) agno = start_agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) pag = xfs_perag_get(mp, agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) if (!pag->pagi_inodeok) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) xfs_ialloc_next_ag(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) goto nextag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (!pag->pagi_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) error = xfs_ialloc_pagi_init(mp, tp, agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * Do a first racy fast path check if this AG is usable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) if (!pag->pagi_freecount && !okalloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) goto nextag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * Then read in the AGI buffer and recheck with the AGI buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) * lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) if (pag->pagi_freecount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) xfs_perag_put(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) goto out_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (!okalloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) goto nextag_relse_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) xfs_trans_brelse(tp, agbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (error != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) xfs_perag_put(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) *inop = NULLFSINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (ialloced) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * We successfully allocated some inodes, return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) * the current context to the caller so that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * can commit the current transaction and call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) * us again where we left off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) ASSERT(pag->pagi_freecount > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) xfs_perag_put(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) *IO_agbp = agbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) *inop = NULLFSINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) nextag_relse_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) xfs_trans_brelse(tp, agbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) nextag:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) xfs_perag_put(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) if (++agno == mp->m_sb.sb_agcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) agno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) if (agno == start_agno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) *inop = NULLFSINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) return noroom ? -ENOSPC : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) out_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) *IO_agbp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) return xfs_dialloc_ag(tp, agbp, parent, inop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) xfs_perag_put(pag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * Free the blocks of an inode chunk. We must consider that the inode chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * might be sparse and only free the regions that are allocated as part of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) xfs_difree_inode_chunk(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) xfs_agnumber_t agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) struct xfs_inobt_rec_incore *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) struct xfs_mount *mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) rec->ir_startino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) int startidx, endidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) int nextbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) xfs_agblock_t agbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) int contigblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) if (!xfs_inobt_issparse(rec->ir_holemask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) /* not sparse, calculate extent info directly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, sagbno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) M_IGEO(mp)->ialloc_blks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) &XFS_RMAP_OINFO_INODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) /* holemask is only 16-bits (fits in an unsigned long) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) ASSERT(sizeof(rec->ir_holemask) <= sizeof(holemask[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) holemask[0] = rec->ir_holemask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) * Find contiguous ranges of zeroes (i.e., allocated regions) in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * holemask and convert the start/end index of each range to an extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * We start with the start and end index both pointing at the first 0 in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * the mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) startidx = endidx = find_first_zero_bit(holemask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) XFS_INOBT_HOLEMASK_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) nextbit = startidx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) while (startidx < XFS_INOBT_HOLEMASK_BITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) nextbit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) * If the next zero bit is contiguous, update the end index of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) * the current range and continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) if (nextbit != XFS_INOBT_HOLEMASK_BITS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) nextbit == endidx + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) endidx = nextbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) * nextbit is not contiguous with the current end index. Convert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) * the current start/end to an extent and add it to the free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) agbno = sagbno + (startidx * XFS_INODES_PER_HOLEMASK_BIT) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) mp->m_sb.sb_inopblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) contigblk = ((endidx - startidx + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) XFS_INODES_PER_HOLEMASK_BIT) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) mp->m_sb.sb_inopblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) xfs_bmap_add_free(tp, XFS_AGB_TO_FSB(mp, agno, agbno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) contigblk, &XFS_RMAP_OINFO_INODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) /* reset range to current bit and carry on... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) startidx = endidx = nextbit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) nextbit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) xfs_difree_inobt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) struct xfs_buf *agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) xfs_agino_t agino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) struct xfs_icluster *xic,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) struct xfs_inobt_rec_incore *orec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) struct xfs_agi *agi = agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) struct xfs_btree_cur *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) struct xfs_inobt_rec_incore rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) int ilen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) int off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) ASSERT(XFS_AGINO_TO_AGBNO(mp, agino) < be32_to_cpu(agi->agi_length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) * Initialize the cursor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) error = xfs_check_agi_freecount(cur, agi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * Look for the entry describing this inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) if ((error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) xfs_warn(mp, "%s: xfs_inobt_lookup() returned error %d.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) __func__, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) error = xfs_inobt_get_rec(cur, &rec, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) xfs_warn(mp, "%s: xfs_inobt_get_rec() returned error %d.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) __func__, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) * Get the offset in the inode chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) off = agino - rec.ir_startino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) ASSERT(off >= 0 && off < XFS_INODES_PER_CHUNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) ASSERT(!(rec.ir_free & XFS_INOBT_MASK(off)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * Mark the inode free & increment the count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) rec.ir_free |= XFS_INOBT_MASK(off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) rec.ir_freecount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) * When an inode chunk is free, it becomes eligible for removal. Don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) * remove the chunk if the block size is large enough for multiple inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) * chunks (that might not be free).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) if (!(mp->m_flags & XFS_MOUNT_IKEEP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) rec.ir_free == XFS_INOBT_ALL_FREE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) struct xfs_perag *pag = agbp->b_pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) xic->deleted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) xic->alloc = xfs_inobt_irec_to_allocmask(&rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) * Remove the inode cluster from the AGI B+Tree, adjust the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) * AGI and Superblock inode counts, and mark the disk space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) * to be freed when the transaction is committed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) ilen = rec.ir_freecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) be32_add_cpu(&agi->agi_count, -ilen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) pag->pagi_freecount -= ilen - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) pag->pagi_count -= ilen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) if ((error = xfs_btree_delete(cur, &i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) xfs_warn(mp, "%s: xfs_btree_delete returned error %d.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) __func__, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) xfs_difree_inode_chunk(tp, agno, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) xic->deleted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) error = xfs_inobt_update(cur, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) xfs_warn(mp, "%s: xfs_inobt_update returned error %d.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) __func__, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) * Change the inode free counts and log the ag/sb changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) be32_add_cpu(&agi->agi_freecount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) agbp->b_pag->pagi_freecount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) error = xfs_check_agi_freecount(cur, agi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) *orec = rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) error0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) * Free an inode in the free inode btree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) xfs_difree_finobt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) struct xfs_buf *agbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) xfs_agino_t agino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) struct xfs_inobt_rec_incore *ibtrec) /* inobt record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) struct xfs_agi *agi = agbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) struct xfs_btree_cur *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) struct xfs_inobt_rec_incore rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) int offset = agino - ibtrec->ir_startino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_FINO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) error = xfs_inobt_lookup(cur, ibtrec->ir_startino, XFS_LOOKUP_EQ, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) * If the record does not exist in the finobt, we must have just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) * freed an inode in a previously fully allocated chunk. If not,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) * something is out of sync.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (XFS_IS_CORRUPT(mp, ibtrec->ir_freecount != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) error = xfs_inobt_insert_rec(cur, ibtrec->ir_holemask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) ibtrec->ir_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) ibtrec->ir_freecount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) ibtrec->ir_free, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) ASSERT(i == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) * Read and update the existing record. We could just copy the ibtrec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) * across here, but that would defeat the purpose of having redundant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) * metadata. By making the modifications independently, we can catch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) * corruptions that we wouldn't see if we just copied from one record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) * to another.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) error = xfs_inobt_get_rec(cur, &rec, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) if (XFS_IS_CORRUPT(mp, i != 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) rec.ir_free |= XFS_INOBT_MASK(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) rec.ir_freecount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) if (XFS_IS_CORRUPT(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) rec.ir_free != ibtrec->ir_free ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) rec.ir_freecount != ibtrec->ir_freecount)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * The content of inobt records should always match between the inobt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * and finobt. The lifecycle of records in the finobt is different from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) * the inobt in that the finobt only tracks records with at least one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * free inode. Hence, if all of the inodes are free and we aren't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) * keeping inode chunks permanently on disk, remove the record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) * Otherwise, update the record with the new information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) * Note that we currently can't free chunks when the block size is large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) * enough for multiple chunks. Leave the finobt record to remain in sync
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) * with the inobt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) if (rec.ir_free == XFS_INOBT_ALL_FREE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) !(mp->m_flags & XFS_MOUNT_IKEEP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) error = xfs_btree_delete(cur, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) ASSERT(i == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) error = xfs_inobt_update(cur, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) error = xfs_check_agi_freecount(cur, agi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) * Free disk inode. Carefully avoids touching the incore inode, all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) * manipulations incore are the caller's responsibility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) * The on-disk inode is not changed by this operation, only the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) * btree (free inode mask) is changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) xfs_difree(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) struct xfs_trans *tp, /* transaction pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) xfs_ino_t inode, /* inode to be freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) struct xfs_icluster *xic) /* cluster info if deleted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) /* REFERENCED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) xfs_agblock_t agbno; /* block number containing inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) struct xfs_buf *agbp; /* buffer for allocation group header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) xfs_agino_t agino; /* allocation group inode number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) xfs_agnumber_t agno; /* allocation group number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) int error; /* error return value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) struct xfs_mount *mp; /* mount structure for filesystem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) struct xfs_inobt_rec_incore rec;/* btree record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) * Break up inode number into its components.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) agno = XFS_INO_TO_AGNO(mp, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) if (agno >= mp->m_sb.sb_agcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) xfs_warn(mp, "%s: agno >= mp->m_sb.sb_agcount (%d >= %d).",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) __func__, agno, mp->m_sb.sb_agcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) agino = XFS_INO_TO_AGINO(mp, inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) if (inode != XFS_AGINO_TO_INO(mp, agno, agino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) xfs_warn(mp, "%s: inode != XFS_AGINO_TO_INO() (%llu != %llu).",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) __func__, (unsigned long long)inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) (unsigned long long)XFS_AGINO_TO_INO(mp, agno, agino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) agbno = XFS_AGINO_TO_AGBNO(mp, agino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) if (agbno >= mp->m_sb.sb_agblocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) xfs_warn(mp, "%s: agbno >= mp->m_sb.sb_agblocks (%d >= %d).",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) __func__, agbno, mp->m_sb.sb_agblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) * Get the allocation group header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) __func__, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * Fix up the inode allocation btree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) error = xfs_difree_inobt(mp, tp, agbp, agino, xic, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) * Fix up the free inode btree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) error = xfs_difree_finobt(mp, tp, agbp, agino, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) goto error0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) error0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) xfs_imap_lookup(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) xfs_agnumber_t agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) xfs_agino_t agino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) xfs_agblock_t agbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) xfs_agblock_t *chunk_agbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) xfs_agblock_t *offset_agbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) struct xfs_inobt_rec_incore rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) struct xfs_btree_cur *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) struct xfs_buf *agbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) xfs_alert(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) "%s: xfs_ialloc_read_agi() returned error %d, agno %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) __func__, error, agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) * Lookup the inode record for the given agino. If the record cannot be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) * found, then it's an invalid inode number and we should abort. Once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) * we have a record, we need to ensure it contains the inode number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) * we are looking up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) cur = xfs_inobt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_INO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) error = xfs_inobt_lookup(cur, agino, XFS_LOOKUP_LE, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) error = xfs_inobt_get_rec(cur, &rec, &i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) if (!error && i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) error = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) xfs_trans_brelse(tp, agbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) xfs_btree_del_cursor(cur, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) /* check that the returned record contains the required inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) if (rec.ir_startino > agino ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) rec.ir_startino + M_IGEO(mp)->ialloc_inos <= agino)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) /* for untrusted inodes check it is allocated first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) if ((flags & XFS_IGET_UNTRUSTED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) (rec.ir_free & XFS_INOBT_MASK(agino - rec.ir_startino)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) *chunk_agbno = XFS_AGINO_TO_AGBNO(mp, rec.ir_startino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) *offset_agbno = agbno - *chunk_agbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) * Return the location of the inode in imap, for mapping it into a buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) xfs_imap(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) xfs_mount_t *mp, /* file system mount structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) xfs_trans_t *tp, /* transaction pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) xfs_ino_t ino, /* inode to locate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) struct xfs_imap *imap, /* location map structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) uint flags) /* flags for inode btree lookup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) xfs_agblock_t agbno; /* block number of inode in the alloc group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) xfs_agino_t agino; /* inode number within alloc group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) xfs_agnumber_t agno; /* allocation group number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) xfs_agblock_t chunk_agbno; /* first block in inode chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) xfs_agblock_t cluster_agbno; /* first block in inode cluster */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) int error; /* error code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) int offset; /* index of inode in its buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) xfs_agblock_t offset_agbno; /* blks from chunk start to inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) ASSERT(ino != NULLFSINO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) * Split up the inode number into its parts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) agno = XFS_INO_TO_AGNO(mp, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) agino = XFS_INO_TO_AGINO(mp, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) agbno = XFS_AGINO_TO_AGBNO(mp, agino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) if (agno >= mp->m_sb.sb_agcount || agbno >= mp->m_sb.sb_agblocks ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) * Don't output diagnostic information for untrusted inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) * as they can be invalid without implying corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) if (flags & XFS_IGET_UNTRUSTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) if (agno >= mp->m_sb.sb_agcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) xfs_alert(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) "%s: agno (%d) >= mp->m_sb.sb_agcount (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) __func__, agno, mp->m_sb.sb_agcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) if (agbno >= mp->m_sb.sb_agblocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) xfs_alert(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) "%s: agbno (0x%llx) >= mp->m_sb.sb_agblocks (0x%lx)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) __func__, (unsigned long long)agbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) (unsigned long)mp->m_sb.sb_agblocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) if (ino != XFS_AGINO_TO_INO(mp, agno, agino)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) xfs_alert(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) "%s: ino (0x%llx) != XFS_AGINO_TO_INO() (0x%llx)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) __func__, ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) XFS_AGINO_TO_INO(mp, agno, agino));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) xfs_stack_trace();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) #endif /* DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) * For bulkstat and handle lookups, we have an untrusted inode number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) * that we have to verify is valid. We cannot do this just by reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) * the inode buffer as it may have been unlinked and removed leaving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) * inodes in stale state on disk. Hence we have to do a btree lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) * in all cases where an untrusted inode number is passed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) if (flags & XFS_IGET_UNTRUSTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) &chunk_agbno, &offset_agbno, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) goto out_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) * If the inode cluster size is the same as the blocksize or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) * smaller we get to the buffer by simple arithmetics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) if (M_IGEO(mp)->blocks_per_cluster == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) offset = XFS_INO_TO_OFFSET(mp, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) ASSERT(offset < mp->m_sb.sb_inopblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) imap->im_len = XFS_FSB_TO_BB(mp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) imap->im_boffset = (unsigned short)(offset <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) mp->m_sb.sb_inodelog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) * If the inode chunks are aligned then use simple maths to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) * find the location. Otherwise we have to do a btree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) * lookup to find the location.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) if (M_IGEO(mp)->inoalign_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) offset_agbno = agbno & M_IGEO(mp)->inoalign_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) chunk_agbno = agbno - offset_agbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) error = xfs_imap_lookup(mp, tp, agno, agino, agbno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) &chunk_agbno, &offset_agbno, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) out_map:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) ASSERT(agbno >= chunk_agbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) cluster_agbno = chunk_agbno +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) ((offset_agbno / M_IGEO(mp)->blocks_per_cluster) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) M_IGEO(mp)->blocks_per_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) offset = ((agbno - cluster_agbno) * mp->m_sb.sb_inopblock) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) XFS_INO_TO_OFFSET(mp, ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) imap->im_blkno = XFS_AGB_TO_DADDR(mp, agno, cluster_agbno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) imap->im_len = XFS_FSB_TO_BB(mp, M_IGEO(mp)->blocks_per_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) imap->im_boffset = (unsigned short)(offset << mp->m_sb.sb_inodelog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) * If the inode number maps to a block outside the bounds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) * of the file system then return NULL rather than calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) * read_buf and panicing when we get an error from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) * driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) if ((imap->im_blkno + imap->im_len) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) xfs_alert(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) "%s: (im_blkno (0x%llx) + im_len (0x%llx)) > sb_dblocks (0x%llx)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) __func__, (unsigned long long) imap->im_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) (unsigned long long) imap->im_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) * Log specified fields for the ag hdr (inode section). The growth of the agi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) * structure over time requires that we interpret the buffer as two logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) * regions delineated by the end of the unlinked list. This is due to the size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) * of the hash table and its location in the middle of the agi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) * For example, a request to log a field before agi_unlinked and a field after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) * agi_unlinked could cause us to log the entire hash table and use an excessive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) * amount of log space. To avoid this behavior, log the region up through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) * agi_unlinked in one call and the region after agi_unlinked through the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) * the structure in another.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) xfs_ialloc_log_agi(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) xfs_trans_t *tp, /* transaction pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) xfs_buf_t *bp, /* allocation group header buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) int fields) /* bitmask of fields to log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) int first; /* first byte number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) int last; /* last byte number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) static const short offsets[] = { /* field starting offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) /* keep in sync with bit definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) offsetof(xfs_agi_t, agi_magicnum),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) offsetof(xfs_agi_t, agi_versionnum),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) offsetof(xfs_agi_t, agi_seqno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) offsetof(xfs_agi_t, agi_length),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) offsetof(xfs_agi_t, agi_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) offsetof(xfs_agi_t, agi_root),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) offsetof(xfs_agi_t, agi_level),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) offsetof(xfs_agi_t, agi_freecount),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) offsetof(xfs_agi_t, agi_newino),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) offsetof(xfs_agi_t, agi_dirino),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) offsetof(xfs_agi_t, agi_unlinked),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) offsetof(xfs_agi_t, agi_free_root),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) offsetof(xfs_agi_t, agi_free_level),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) offsetof(xfs_agi_t, agi_iblocks),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) sizeof(xfs_agi_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) struct xfs_agi *agi = bp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) * Compute byte offsets for the first and last fields in the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) * region and log the agi buffer. This only logs up through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) * agi_unlinked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) if (fields & XFS_AGI_ALL_BITS_R1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) &first, &last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) xfs_trans_log_buf(tp, bp, first, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) * Mask off the bits in the first region and calculate the first and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) * last field offsets for any bits in the second region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) fields &= ~XFS_AGI_ALL_BITS_R1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) if (fields) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) xfs_btree_offsets(fields, offsets, XFS_AGI_NUM_BITS_R2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) &first, &last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) xfs_trans_log_buf(tp, bp, first, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) static xfs_failaddr_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) xfs_agi_verify(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) struct xfs_mount *mp = bp->b_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) struct xfs_agi *agi = bp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) if (xfs_sb_version_hascrc(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) if (!uuid_equal(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) if (!xfs_log_check_lsn(mp, be64_to_cpu(agi->agi_lsn)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) * Validate the magic number of the agi block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) if (!xfs_verify_magic(bp, agi->agi_magicnum))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) if (!XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) if (be32_to_cpu(agi->agi_level) < 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) be32_to_cpu(agi->agi_level) > XFS_BTREE_MAXLEVELS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) if (xfs_sb_version_hasfinobt(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) (be32_to_cpu(agi->agi_free_level) < 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) be32_to_cpu(agi->agi_free_level) > XFS_BTREE_MAXLEVELS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) * during growfs operations, the perag is not fully initialised,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) * so we can't use it for any useful checking. growfs ensures we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) * use it by using uncached buffers that don't have the perag attached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) * so we can detect and avoid this problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) if (bp->b_pag && be32_to_cpu(agi->agi_seqno) != bp->b_pag->pag_agno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) if (agi->agi_unlinked[i] == cpu_to_be32(NULLAGINO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) if (!xfs_verify_ino(mp, be32_to_cpu(agi->agi_unlinked[i])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) return __this_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) xfs_agi_read_verify(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) struct xfs_mount *mp = bp->b_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) xfs_failaddr_t fa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) if (xfs_sb_version_hascrc(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) !xfs_buf_verify_cksum(bp, XFS_AGI_CRC_OFF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) xfs_verifier_error(bp, -EFSBADCRC, __this_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) fa = xfs_agi_verify(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) if (XFS_TEST_ERROR(fa, mp, XFS_ERRTAG_IALLOC_READ_AGI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) xfs_verifier_error(bp, -EFSCORRUPTED, fa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) xfs_agi_write_verify(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) struct xfs_mount *mp = bp->b_mount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) struct xfs_agi *agi = bp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) xfs_failaddr_t fa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) fa = xfs_agi_verify(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) if (fa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) xfs_verifier_error(bp, -EFSCORRUPTED, fa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) if (!xfs_sb_version_hascrc(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) if (bip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) agi->agi_lsn = cpu_to_be64(bip->bli_item.li_lsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) xfs_buf_update_cksum(bp, XFS_AGI_CRC_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) const struct xfs_buf_ops xfs_agi_buf_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) .name = "xfs_agi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) .magic = { cpu_to_be32(XFS_AGI_MAGIC), cpu_to_be32(XFS_AGI_MAGIC) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) .verify_read = xfs_agi_read_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) .verify_write = xfs_agi_write_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) .verify_struct = xfs_agi_verify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) * Read in the allocation group header (inode allocation section)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) xfs_read_agi(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) struct xfs_mount *mp, /* file system mount structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) struct xfs_trans *tp, /* transaction pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) xfs_agnumber_t agno, /* allocation group number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) struct xfs_buf **bpp) /* allocation group hdr buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) trace_xfs_read_agi(mp, agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) ASSERT(agno != NULLAGNUMBER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) XFS_FSS_TO_BB(mp, 1), 0, bpp, &xfs_agi_buf_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) if (tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_AGI_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) xfs_buf_set_ref(*bpp, XFS_AGI_REF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) xfs_ialloc_read_agi(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) struct xfs_mount *mp, /* file system mount structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) struct xfs_trans *tp, /* transaction pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) xfs_agnumber_t agno, /* allocation group number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) struct xfs_buf **bpp) /* allocation group hdr buf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) struct xfs_agi *agi; /* allocation group header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) struct xfs_perag *pag; /* per allocation group data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) trace_xfs_ialloc_read_agi(mp, agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) error = xfs_read_agi(mp, tp, agno, bpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) agi = (*bpp)->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) pag = (*bpp)->b_pag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) if (!pag->pagi_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) pag->pagi_count = be32_to_cpu(agi->agi_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) pag->pagi_init = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) * It's possible for these to be out of sync if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) * we are in the middle of a forced shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) XFS_FORCED_SHUTDOWN(mp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) * Read in the agi to initialise the per-ag data in the mount structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) xfs_ialloc_pagi_init(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) xfs_mount_t *mp, /* file system mount structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) xfs_trans_t *tp, /* transaction pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) xfs_agnumber_t agno) /* allocation group number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) xfs_buf_t *bp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) error = xfs_ialloc_read_agi(mp, tp, agno, &bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) if (bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) xfs_trans_brelse(tp, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) /* Is there an inode record covering a given range of inode numbers? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) xfs_ialloc_has_inode_record(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) xfs_agino_t low,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) xfs_agino_t high,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) bool *exists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) struct xfs_inobt_rec_incore irec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) xfs_agino_t agino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) uint16_t holemask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) int has_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) *exists = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) error = xfs_inobt_lookup(cur, low, XFS_LOOKUP_LE, &has_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) while (error == 0 && has_record) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) error = xfs_inobt_get_rec(cur, &irec, &has_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) if (error || irec.ir_startino > high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) agino = irec.ir_startino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) holemask = irec.ir_holemask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; holemask >>= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) i++, agino += XFS_INODES_PER_HOLEMASK_BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) if (holemask & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) if (agino + XFS_INODES_PER_HOLEMASK_BIT > low &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) agino <= high) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) *exists = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) error = xfs_btree_increment(cur, 0, &has_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) /* Is there an inode record covering a given extent? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) xfs_ialloc_has_inodes_at_extent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) xfs_agblock_t bno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) xfs_extlen_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) bool *exists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) xfs_agino_t low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) xfs_agino_t high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) low = XFS_AGB_TO_AGINO(cur->bc_mp, bno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) high = XFS_AGB_TO_AGINO(cur->bc_mp, bno + len) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) return xfs_ialloc_has_inode_record(cur, low, high, exists);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) struct xfs_ialloc_count_inodes {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) xfs_agino_t count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) xfs_agino_t freecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) /* Record inode counts across all inobt records. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) xfs_ialloc_count_inodes_rec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) union xfs_btree_rec *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) struct xfs_inobt_rec_incore irec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) struct xfs_ialloc_count_inodes *ci = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) xfs_inobt_btrec_to_irec(cur->bc_mp, rec, &irec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) ci->count += irec.ir_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) ci->freecount += irec.ir_freecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) /* Count allocated and free inodes under an inobt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) xfs_ialloc_count_inodes(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) xfs_agino_t *count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) xfs_agino_t *freecount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) struct xfs_ialloc_count_inodes ci = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) ASSERT(cur->bc_btnum == XFS_BTNUM_INO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) error = xfs_btree_query_all(cur, xfs_ialloc_count_inodes_rec, &ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) *count = ci.count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) *freecount = ci.freecount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) * Initialize inode-related geometry information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) * Compute the inode btree min and max levels and set maxicount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) * Set the inode cluster size. This may still be overridden by the file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) * system block size if it is larger than the chosen cluster size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) * For v5 filesystems, scale the cluster size with the inode size to keep a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) * constant ratio of inode per cluster buffer, but only if mkfs has set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) * inode alignment value appropriately for larger cluster sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) * Then compute the inode cluster alignment information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) xfs_ialloc_setup_geometry(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) struct xfs_sb *sbp = &mp->m_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) struct xfs_ino_geometry *igeo = M_IGEO(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) uint64_t icount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) uint inodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) igeo->new_diflags2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) if (xfs_sb_version_hasbigtime(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) igeo->new_diflags2 |= XFS_DIFLAG2_BIGTIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) /* Compute inode btree geometry. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) igeo->agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) igeo->inobt_mxr[0] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) igeo->inobt_mxr[1] = xfs_inobt_maxrecs(mp, sbp->sb_blocksize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) igeo->inobt_mnr[0] = igeo->inobt_mxr[0] / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) igeo->inobt_mnr[1] = igeo->inobt_mxr[1] / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) igeo->ialloc_inos = max_t(uint16_t, XFS_INODES_PER_CHUNK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) sbp->sb_inopblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) igeo->ialloc_blks = igeo->ialloc_inos >> sbp->sb_inopblog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) if (sbp->sb_spino_align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) igeo->ialloc_min_blks = sbp->sb_spino_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) igeo->ialloc_min_blks = igeo->ialloc_blks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) /* Compute and fill in value of m_ino_geo.inobt_maxlevels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) igeo->inobt_maxlevels = xfs_btree_compute_maxlevels(igeo->inobt_mnr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) inodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) * Set the maximum inode count for this filesystem, being careful not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) * to use obviously garbage sb_inopblog/sb_inopblock values. Regular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) * users should never get here due to failing sb verification, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) * certain users (xfs_db) need to be usable even with corrupt metadata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) if (sbp->sb_imax_pct && igeo->ialloc_blks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) * Make sure the maximum inode count is a multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) * of the units we allocate inodes in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) icount = sbp->sb_dblocks * sbp->sb_imax_pct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) do_div(icount, 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) do_div(icount, igeo->ialloc_blks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) igeo->maxicount = XFS_FSB_TO_INO(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) icount * igeo->ialloc_blks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) igeo->maxicount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) * Compute the desired size of an inode cluster buffer size, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) * starts at 8K and (on v5 filesystems) scales up with larger inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) * sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) * Preserve the desired inode cluster size because the sparse inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) * feature uses that desired size (not the actual size) to compute the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) * sparse inode alignment. The mount code validates this value, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) * cannot change the behavior.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) igeo->inode_cluster_size_raw = XFS_INODE_BIG_CLUSTER_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) int new_size = igeo->inode_cluster_size_raw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) new_size *= mp->m_sb.sb_inodesize / XFS_DINODE_MIN_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) if (mp->m_sb.sb_inoalignmt >= XFS_B_TO_FSBT(mp, new_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) igeo->inode_cluster_size_raw = new_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) /* Calculate inode cluster ratios. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) if (igeo->inode_cluster_size_raw > mp->m_sb.sb_blocksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) igeo->blocks_per_cluster = XFS_B_TO_FSBT(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) igeo->inode_cluster_size_raw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) igeo->blocks_per_cluster = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) igeo->inode_cluster_size = XFS_FSB_TO_B(mp, igeo->blocks_per_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) igeo->inodes_per_cluster = XFS_FSB_TO_INO(mp, igeo->blocks_per_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) /* Calculate inode cluster alignment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) if (xfs_sb_version_hasalign(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) mp->m_sb.sb_inoalignmt >= igeo->blocks_per_cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) igeo->cluster_align = mp->m_sb.sb_inoalignmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) igeo->cluster_align = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) igeo->inoalign_mask = igeo->cluster_align - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) igeo->cluster_align_inodes = XFS_FSB_TO_INO(mp, igeo->cluster_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) * If we are using stripe alignment, check whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) * the stripe unit is a multiple of the inode alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) if (mp->m_dalign && igeo->inoalign_mask &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) !(mp->m_dalign & igeo->inoalign_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) igeo->ialloc_align = mp->m_dalign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) igeo->ialloc_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) /* Compute the location of the root directory inode that is laid out by mkfs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) xfs_ino_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) xfs_ialloc_calc_rootino(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) int sunit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) struct xfs_ino_geometry *igeo = M_IGEO(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) xfs_agblock_t first_bno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) * Pre-calculate the geometry of AG 0. We know what it looks like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) * because libxfs knows how to create allocation groups now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) * first_bno is the first block in which mkfs could possibly have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) * allocated the root directory inode, once we factor in the metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) * that mkfs formats before it. Namely, the four AG headers...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) first_bno = howmany(4 * mp->m_sb.sb_sectsize, mp->m_sb.sb_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) /* ...the two free space btree roots... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) first_bno += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) /* ...the inode btree root... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) first_bno += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) /* ...the initial AGFL... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) first_bno += xfs_alloc_min_freelist(mp, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) /* ...the free inode btree root... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) if (xfs_sb_version_hasfinobt(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) first_bno++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) /* ...the reverse mapping btree root... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) if (xfs_sb_version_hasrmapbt(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) first_bno++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) /* ...the reference count btree... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) if (xfs_sb_version_hasreflink(&mp->m_sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) first_bno++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) * ...and the log, if it is allocated in the first allocation group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) * This can happen with filesystems that only have a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) * allocation group, or very odd geometries created by old mkfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) * versions on very small filesystems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) if (mp->m_sb.sb_logstart &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) XFS_FSB_TO_AGNO(mp, mp->m_sb.sb_logstart) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) first_bno += mp->m_sb.sb_logblocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) * Now round first_bno up to whatever allocation alignment is given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) * by the filesystem or was passed in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) if (xfs_sb_version_hasdalign(&mp->m_sb) && igeo->ialloc_align > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) first_bno = roundup(first_bno, sunit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) else if (xfs_sb_version_hasalign(&mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) mp->m_sb.sb_inoalignmt > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) first_bno = roundup(first_bno, mp->m_sb.sb_inoalignmt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) return XFS_AGINO_TO_INO(mp, 0, XFS_AGB_TO_AGINO(mp, first_bno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) }