^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2017 Oracle. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Darrick J. Wong <darrick.wong@oracle.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef __XFS_SCRUB_COMMON_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define __XFS_SCRUB_COMMON_H__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * We /could/ terminate a scrub/repair operation early. If we're not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * in a good place to continue (fatal signal, etc.) then bail out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Note that we're careful not to make any judgements about *error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) xchk_should_terminate(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) int *error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * If preemption is disabled, we need to yield to the scheduler every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * few seconds so that we don't run afoul of the soft lockup watchdog
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * or RCU stall detector.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (fatal_signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (*error == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *error = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) int xchk_trans_alloc(struct xfs_scrub *sc, uint resblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) bool xchk_process_error(struct xfs_scrub *sc, xfs_agnumber_t agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) xfs_agblock_t bno, int *error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) bool xchk_fblock_process_error(struct xfs_scrub *sc, int whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) xfs_fileoff_t offset, int *error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) bool xchk_xref_process_error(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) xfs_agnumber_t agno, xfs_agblock_t bno, int *error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) bool xchk_fblock_xref_process_error(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int whichfork, xfs_fileoff_t offset, int *error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) void xchk_block_set_preen(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct xfs_buf *bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) void xchk_set_corrupt(struct xfs_scrub *sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void xchk_block_set_corrupt(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct xfs_buf *bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void xchk_ino_set_corrupt(struct xfs_scrub *sc, xfs_ino_t ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) void xchk_fblock_set_corrupt(struct xfs_scrub *sc, int whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) xfs_fileoff_t offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) void xchk_block_xref_set_corrupt(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct xfs_buf *bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) void xchk_ino_xref_set_corrupt(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) xfs_ino_t ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) void xchk_fblock_xref_set_corrupt(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int whichfork, xfs_fileoff_t offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) void xchk_ino_set_warning(struct xfs_scrub *sc, xfs_ino_t ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) void xchk_fblock_set_warning(struct xfs_scrub *sc, int whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) xfs_fileoff_t offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) void xchk_set_incomplete(struct xfs_scrub *sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int xchk_checkpoint_log(struct xfs_mount *mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* Are we set up for a cross-referencing check? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) bool xchk_should_check_xref(struct xfs_scrub *sc, int *error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct xfs_btree_cur **curpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* Setup functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int xchk_setup_fs(struct xfs_scrub *sc, struct xfs_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int xchk_setup_ag_allocbt(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct xfs_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) int xchk_setup_ag_iallocbt(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct xfs_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int xchk_setup_ag_rmapbt(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct xfs_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int xchk_setup_ag_refcountbt(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct xfs_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) int xchk_setup_inode(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct xfs_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int xchk_setup_inode_bmap(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct xfs_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int xchk_setup_inode_bmap_data(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct xfs_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) int xchk_setup_directory(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct xfs_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) int xchk_setup_xattr(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct xfs_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) int xchk_setup_symlink(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct xfs_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) int xchk_setup_parent(struct xfs_scrub *sc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct xfs_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #ifdef CONFIG_XFS_RT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int xchk_setup_rt(struct xfs_scrub *sc, struct xfs_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) xchk_setup_rt(struct xfs_scrub *sc, struct xfs_inode *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #ifdef CONFIG_XFS_QUOTA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) int xchk_setup_quota(struct xfs_scrub *sc, struct xfs_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) xchk_setup_quota(struct xfs_scrub *sc, struct xfs_inode *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int xchk_setup_fscounters(struct xfs_scrub *sc, struct xfs_inode *ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct xchk_ag *sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) void xchk_perag_get(struct xfs_mount *mp, struct xchk_ag *sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int xchk_ag_read_headers(struct xfs_scrub *sc, xfs_agnumber_t agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct xfs_buf **agi, struct xfs_buf **agf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct xfs_buf **agfl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) void xchk_ag_btcur_free(struct xchk_ag *sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int xchk_ag_btcur_init(struct xfs_scrub *sc, struct xchk_ag *sa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) int xchk_count_rmap_ownedby_ag(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) const struct xfs_owner_info *oinfo, xfs_filblks_t *blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) int xchk_setup_ag_btree(struct xfs_scrub *sc, struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) bool force_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) int xchk_get_inode(struct xfs_scrub *sc, struct xfs_inode *ip_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int xchk_setup_inode_contents(struct xfs_scrub *sc, struct xfs_inode *ip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) unsigned int resblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void xchk_buffer_recheck(struct xfs_scrub *sc, struct xfs_buf *bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * Don't bother cross-referencing if we already found corruption or cross
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * referencing discrepancies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static inline bool xchk_skip_xref(struct xfs_scrub_metadata *sm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) XFS_SCRUB_OFLAG_XCORRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int xchk_metadata_inode_forks(struct xfs_scrub *sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int xchk_ilock_inverted(struct xfs_inode *ip, uint lock_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) void xchk_stop_reaping(struct xfs_scrub *sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void xchk_start_reaping(struct xfs_scrub *sc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #endif /* __XFS_SCRUB_COMMON_H__ */