Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2017 Oracle.  All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Author: Darrick J. Wong <darrick.wong@oracle.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "xfs_btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include "xfs_rmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include "xfs_refcount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "scrub/scrub.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "scrub/common.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include "scrub/btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * Set us up to scrub reverse mapping btrees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) xchk_setup_ag_rmapbt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct xfs_scrub	*sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	struct xfs_inode	*ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	return xchk_setup_ag_btree(sc, ip, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /* Reverse-mapping scrubber. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /* Cross-reference a rmap against the refcount btree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) xchk_rmapbt_xref_refc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct xfs_scrub	*sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct xfs_rmap_irec	*irec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	xfs_agblock_t		fbno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	xfs_extlen_t		flen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	bool			non_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	bool			is_bmbt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	bool			is_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	bool			is_unwritten;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	int			error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	non_inode = XFS_RMAP_NON_INODE_OWNER(irec->rm_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	/* If this is shared, must be a data fork extent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 			irec->rm_blockcount, &fbno, &flen, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	if (flen != 0 && (non_inode || is_attr || is_bmbt || is_unwritten))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) /* Cross-reference with the other btrees. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) xchk_rmapbt_xref(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct xfs_scrub	*sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct xfs_rmap_irec	*irec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	xfs_agblock_t		agbno = irec->rm_startblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	xfs_extlen_t		len = irec->rm_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	xchk_xref_is_used_space(sc, agbno, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	if (irec->rm_owner == XFS_RMAP_OWN_INODES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		xchk_xref_is_inode_chunk(sc, agbno, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		xchk_xref_is_not_inode_chunk(sc, agbno, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	if (irec->rm_owner == XFS_RMAP_OWN_COW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		xchk_xref_is_cow_staging(sc, irec->rm_startblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 				irec->rm_blockcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		xchk_rmapbt_xref_refc(sc, irec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) /* Scrub an rmapbt record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) xchk_rmapbt_rec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	struct xchk_btree	*bs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	union xfs_btree_rec	*rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	struct xfs_mount	*mp = bs->cur->bc_mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	struct xfs_rmap_irec	irec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	xfs_agnumber_t		agno = bs->cur->bc_ag.agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	bool			non_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	bool			is_unwritten;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	bool			is_bmbt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	bool			is_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	int			error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	error = xfs_rmap_btrec_to_irec(rec, &irec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (!xchk_btree_process_error(bs->sc, bs->cur, 0, &error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	/* Check extent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (irec.rm_startblock + irec.rm_blockcount <= irec.rm_startblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	if (irec.rm_owner == XFS_RMAP_OWN_FS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		 * xfs_verify_agbno returns false for static fs metadata.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		 * Since that only exists at the start of the AG, validate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		 * that by hand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		if (irec.rm_startblock != 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		    irec.rm_blockcount != XFS_AGFL_BLOCK(mp) + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		 * Otherwise we must point somewhere past the static metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		 * but before the end of the FS.  Run the regular check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		if (!xfs_verify_agbno(mp, agno, irec.rm_startblock) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		    !xfs_verify_agbno(mp, agno, irec.rm_startblock +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 				irec.rm_blockcount - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	/* Check flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	non_inode = XFS_RMAP_NON_INODE_OWNER(irec.rm_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	is_bmbt = irec.rm_flags & XFS_RMAP_BMBT_BLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	is_attr = irec.rm_flags & XFS_RMAP_ATTR_FORK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	is_unwritten = irec.rm_flags & XFS_RMAP_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	if (is_bmbt && irec.rm_offset != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	if (non_inode && irec.rm_offset != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	if (is_unwritten && (is_bmbt || non_inode || is_attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (non_inode && (is_bmbt || is_unwritten || is_attr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	if (!non_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		if (!xfs_verify_ino(mp, irec.rm_owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		/* Non-inode owner within the magic values? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		if (irec.rm_owner <= XFS_RMAP_OWN_MIN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		    irec.rm_owner > XFS_RMAP_OWN_FS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	xchk_rmapbt_xref(bs->sc, &irec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* Scrub the rmap btree for some AG. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) xchk_rmapbt(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	struct xfs_scrub	*sc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	return xchk_btree(sc, sc->sa.rmap_cur, xchk_rmapbt_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 			&XFS_RMAP_OINFO_AG, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /* xref check that the extent is owned by a given owner */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) xchk_xref_check_owner(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	struct xfs_scrub		*sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	xfs_agblock_t			bno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	xfs_extlen_t			len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	const struct xfs_owner_info	*oinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	bool				should_have_rmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	bool				has_rmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	int				error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	error = xfs_rmap_record_exists(sc->sa.rmap_cur, bno, len, oinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			&has_rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	if (has_rmap != should_have_rmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* xref check that the extent is owned by a given owner */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) xchk_xref_is_owned_by(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	struct xfs_scrub		*sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	xfs_agblock_t			bno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	xfs_extlen_t			len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	const struct xfs_owner_info	*oinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	xchk_xref_check_owner(sc, bno, len, oinfo, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* xref check that the extent is not owned by a given owner */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) xchk_xref_is_not_owned_by(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	struct xfs_scrub		*sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	xfs_agblock_t			bno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	xfs_extlen_t			len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	const struct xfs_owner_info	*oinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	xchk_xref_check_owner(sc, bno, len, oinfo, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* xref check that the extent has no reverse mapping at all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) xchk_xref_has_no_owner(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	struct xfs_scrub	*sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	xfs_agblock_t		bno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	xfs_extlen_t		len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	bool			has_rmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	int			error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	error = xfs_rmap_has_record(sc->sa.rmap_cur, bno, len, &has_rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	if (has_rmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }