Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include "xfs_log_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include "xfs_bit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include "xfs_sb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include "xfs_defer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include "xfs_inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include "xfs_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include "xfs_log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "xfs_log_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "xfs_log_recover.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "xfs_trans_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "xfs_alloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "xfs_ialloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "xfs_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "xfs_icache.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include "xfs_error.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include "xfs_buf_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define BLK_AVG(blk1, blk2)	((blk1+blk2) >> 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) xlog_find_zeroed(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	struct xlog	*,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	xfs_daddr_t	*);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) xlog_clear_stale_blocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	struct xlog	*,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	xfs_lsn_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #if defined(DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) xlog_recover_check_summary(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	struct xlog *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define	xlog_recover_check_summary(log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) xlog_do_recovery_pass(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)         struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * Sector aligned buffer routines for buffer create/read/write/access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * Verify the log-relative block number and length in basic blocks are valid for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  * an operation involving the given XFS log buffer. Returns true if the fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  * are valid, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) xlog_verify_bno(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	struct xlog	*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	xfs_daddr_t	blk_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	int		bbcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	if (blk_no < 0 || blk_no >= log->l_logBBsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * Allocate a buffer to hold log data.  The buffer needs to be able to map to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  * a range of nbblks basic blocks at any valid offset within the log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) static char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) xlog_alloc_buffer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	struct xlog	*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	int		nbblks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	int align_mask = xfs_buftarg_dma_alignment(log->l_targ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	 * Pass log block 0 since we don't have an addr yet, buffer will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	 * verified on read.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 		xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 			nbblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	 * We do log I/O in units of log sectors (a power-of-2 multiple of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	 * basic block size), so we round up the requested size to accommodate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	 * the basic blocks required for complete log sectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	 * In addition, the buffer may be used for a non-sector-aligned block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	 * offset, in which case an I/O of the requested size could extend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	 * beyond the end of the buffer.  If the requested size is only 1 basic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	 * block it will never straddle a sector boundary, so this won't be an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	 * issue.  Nor will this be a problem if the log I/O is done in basic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	 * blocks (sector size 1).  But otherwise we extend the buffer by one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	 * extra log sector to ensure there's space to accommodate this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	 * possibility.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	if (nbblks > 1 && log->l_sectBBsize > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		nbblks += log->l_sectBBsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	nbblks = round_up(nbblks, log->l_sectBBsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  * Return the address of the start of the given block number's data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  * in a log buffer.  The buffer covers a log sector-aligned region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) static inline unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) xlog_align(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	struct xlog	*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	xfs_daddr_t	blk_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) xlog_do_io(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	xfs_daddr_t		blk_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	unsigned int		nbblks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	char			*data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	unsigned int		op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	int			error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		xfs_warn(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 			 "Invalid log block/length (0x%llx, 0x%x) for buffer",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 			 blk_no, nbblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	blk_no = round_down(blk_no, log->l_sectBBsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	nbblks = round_up(nbblks, log->l_sectBBsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	ASSERT(nbblks > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 			BBTOB(nbblks), data, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		xfs_alert(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 			  "log recovery %s I/O error at daddr 0x%llx len %d error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 			  op == REQ_OP_WRITE ? "write" : "read",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 			  blk_no, nbblks, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) xlog_bread_noalign(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	struct xlog	*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	xfs_daddr_t	blk_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	int		nbblks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	char		*data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) xlog_bread(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	struct xlog	*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	xfs_daddr_t	blk_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	int		nbblks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	char		*data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	char		**offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	int		error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		*offset = data + xlog_align(log, blk_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) xlog_bwrite(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	struct xlog	*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	xfs_daddr_t	blk_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	int		nbblks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	char		*data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  * dump debug superblock and log record information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) xlog_header_check_dump(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	xfs_mount_t		*mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	xlog_rec_header_t	*head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	xfs_debug(mp, "%s:  SB : uuid = %pU, fmt = %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 		__func__, &mp->m_sb.sb_uuid, XLOG_FMT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	xfs_debug(mp, "    log : uuid = %pU, fmt = %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		&head->h_fs_uuid, be32_to_cpu(head->h_fmt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) #define xlog_header_check_dump(mp, head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  * check log record header for recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) xlog_header_check_recover(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	xfs_mount_t		*mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	xlog_rec_header_t	*head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	 * IRIX doesn't write the h_fmt field and leaves it zeroed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	 * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	 * a dirty log created in IRIX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	"dirty log written in incompatible format - can't recover");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		xlog_header_check_dump(mp, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 					   &head->h_fs_uuid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		xfs_warn(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	"dirty log entry has mismatched uuid - can't recover");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		xlog_header_check_dump(mp, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243)  * read the head block of the log and check the header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) xlog_header_check_mount(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	xfs_mount_t		*mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	xlog_rec_header_t	*head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	if (uuid_is_null(&head->h_fs_uuid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		 * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		 * h_fs_uuid is null, we assume this log was last mounted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		 * by IRIX and continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		xfs_warn(mp, "null uuid in log - IRIX style log");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	} else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 						  &head->h_fs_uuid))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		xfs_warn(mp, "log has mismatched uuid - can't recover");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		xlog_header_check_dump(mp, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269)  * This routine finds (to an approximation) the first block in the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270)  * log which contains the given cycle.  It uses a binary search algorithm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271)  * Note that the algorithm can not be perfect because the disk will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272)  * necessarily be perfect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) xlog_find_cycle_start(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	struct xlog	*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	char		*buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	xfs_daddr_t	first_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	xfs_daddr_t	*last_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	uint		cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	char		*offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	xfs_daddr_t	mid_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	xfs_daddr_t	end_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	uint		mid_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	int		error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	end_blk = *last_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	mid_blk = BLK_AVG(first_blk, end_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	while (mid_blk != first_blk && mid_blk != end_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		error = xlog_bread(log, mid_blk, 1, buffer, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		mid_cycle = xlog_get_cycle(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		if (mid_cycle == cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 			end_blk = mid_blk;   /* last_half_cycle == mid_cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 			first_blk = mid_blk; /* first_half_cycle == mid_cycle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		mid_blk = BLK_AVG(first_blk, end_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	       (mid_blk == end_blk && mid_blk-1 == first_blk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	*last_blk = end_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  * Check that a range of blocks does not contain stop_on_cycle_no.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  * Fill in *new_blk with the block offset where such a block is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  * found, or with -1 (an invalid block number) if there is no such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * block in the range.  The scan needs to occur from front to back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  * and the pointer into the region must be updated since a later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  * routine will need to perform another test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) xlog_find_verify_cycle(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	struct xlog	*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	xfs_daddr_t	start_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	int		nbblks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	uint		stop_on_cycle_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	xfs_daddr_t	*new_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	xfs_daddr_t	i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	uint		cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	char		*buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	xfs_daddr_t	bufblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	char		*buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	int		error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	 * Greedily allocate a buffer big enough to handle the full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	 * range of basic blocks we'll be examining.  If that fails,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	 * try a smaller size.  We need to be able to read at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	 * a log sector, or we're out of luck.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	bufblks = 1 << ffs(nbblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	while (bufblks > log->l_logBBsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		bufblks >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 		bufblks >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		if (bufblks < log->l_sectBBsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		int	bcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		bcount = min(bufblks, (start_blk + nbblks - i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		error = xlog_bread(log, i, bcount, buffer, &buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		for (j = 0; j < bcount; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 			cycle = xlog_get_cycle(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 			if (cycle == stop_on_cycle_no) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 				*new_blk = i+j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 			buf += BBSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	*new_blk = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	kmem_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		int	h_size = be32_to_cpu(rh->h_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		if ((be32_to_cpu(rh->h_version) & XLOG_VERSION_2) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		    h_size > XLOG_HEADER_CYCLE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			return DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388)  * Potentially backup over partial log record write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390)  * In the typical case, last_blk is the number of the block directly after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391)  * a good log record.  Therefore, we subtract one to get the block number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392)  * of the last block in the given buffer.  extra_bblks contains the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393)  * of blocks we would have read on a previous read.  This happens when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394)  * last log record is split over the end of the physical log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396)  * extra_bblks is the number of blocks potentially verified on a previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397)  * call to this routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) xlog_find_verify_log_record(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	xfs_daddr_t		start_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	xfs_daddr_t		*last_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	int			extra_bblks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	xfs_daddr_t		i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	char			*buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	char			*offset = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	xlog_rec_header_t	*head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	int			error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	int			smallmem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	int			num_blks = *last_blk - start_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	int			xhdrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	ASSERT(start_blk != 0 || *last_blk != start_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	buffer = xlog_alloc_buffer(log, num_blks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	if (!buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		buffer = xlog_alloc_buffer(log, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		smallmem = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		offset += ((num_blks - 1) << BBSHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	for (i = (*last_blk) - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		if (i < start_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			/* valid log record not found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 			xfs_warn(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		"Log inconsistent (didn't find previous header)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		if (smallmem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 			error = xlog_bread(log, i, 1, buffer, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 			if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		head = (xlog_rec_header_t *)offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		if (!smallmem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 			offset -= BBSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	 * We hit the beginning of the physical log & still no header.  Return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	 * to caller.  If caller can handle a return of -1, then this routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	 * will be called again for the end of the physical log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	if (i == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	 * We have the final block of the good log (the first block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	 * of the log record _before_ the head. So we check the uuid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	if ((error = xlog_header_check_mount(log->l_mp, head)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	 * We may have found a log record header before we expected one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	 * last_blk will be the 1st block # with a given cycle #.  We may end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	 * up reading an entire log record.  In this case, we don't want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	 * reset last_blk.  Only when last_blk points in the middle of a log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	 * record do we update last_blk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	xhdrs = xlog_logrec_hblks(log, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if (*last_blk - i + extra_bblks !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	    BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 		*last_blk = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	kmem_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491)  * Head is defined to be the point of the log where the next log write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492)  * could go.  This means that incomplete LR writes at the end are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493)  * eliminated when calculating the head.  We aren't guaranteed that previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  * LR have complete transactions.  We only know that a cycle number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  * current cycle number -1 won't be present in the log if we start writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  * from our current block number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498)  * last_blk contains the block number of the first block with a given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499)  * cycle number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501)  * Return: zero if normal, non-zero if error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) xlog_find_head(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	struct xlog	*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	xfs_daddr_t	*return_head_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	char		*buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	char		*offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	xfs_daddr_t	new_blk, first_blk, start_blk, last_blk, head_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	int		num_scan_bblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	uint		first_half_cycle, last_half_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	uint		stop_on_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	int		error, log_bbnum = log->l_logBBsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	/* Is the end of the log device zeroed? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	error = xlog_find_zeroed(log, &first_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	if (error < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		xfs_warn(log->l_mp, "empty log check failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	if (error == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		*return_head_blk = first_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		/* Is the whole lot zeroed? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		if (!first_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 			/* Linux XFS shouldn't generate totally zeroed logs -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 			 * mkfs etc write a dummy unmount record to a fresh
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 			 * log so we can store the uuid in there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 			xfs_warn(log->l_mp, "totally zeroed log");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	first_blk = 0;			/* get cycle # of 1st block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	buffer = xlog_alloc_buffer(log, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	error = xlog_bread(log, 0, 1, buffer, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	first_half_cycle = xlog_get_cycle(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	last_blk = head_blk = log_bbnum - 1;	/* get cycle # of last block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	error = xlog_bread(log, last_blk, 1, buffer, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	last_half_cycle = xlog_get_cycle(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	ASSERT(last_half_cycle != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	 * If the 1st half cycle number is equal to the last half cycle number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	 * then the entire log is stamped with the same cycle number.  In this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	 * case, head_blk can't be set to zero (which makes sense).  The below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	 * math doesn't work out properly with head_blk equal to zero.  Instead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	 * we set it to log_bbnum which is an invalid block number, but this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	 * value makes the math correct.  If head_blk doesn't changed through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	 * all the tests below, *head_blk is set to zero at the very end rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	 * than log_bbnum.  In a sense, log_bbnum and zero are the same block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	 * in a circular file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	if (first_half_cycle == last_half_cycle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		 * In this case we believe that the entire log should have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		 * cycle number last_half_cycle.  We need to scan backwards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		 * from the end verifying that there are no holes still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		 * containing last_half_cycle - 1.  If we find such a hole,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		 * then the start of that hole will be the new head.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		 * simple case looks like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		 *        x | x ... | x - 1 | x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		 * Another case that fits this picture would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		 *        x | x + 1 | x ... | x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		 * In this case the head really is somewhere at the end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		 * log, as one of the latest writes at the beginning was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		 * incomplete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		 * One more case is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		 *        x | x + 1 | x ... | x - 1 | x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		 * This is really the combination of the above two cases, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		 * the head has to end up at the start of the x-1 hole at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		 * end of the log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		 * In the 256k log case, we will read from the beginning to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		 * end of the log and search for cycle numbers equal to x-1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		 * We don't worry about the x+1 blocks that we encounter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		 * because we know that they cannot be the head since the log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		 * started with x.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		head_blk = log_bbnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		stop_on_cycle = last_half_cycle - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		 * In this case we want to find the first block with cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		 * number matching last_half_cycle.  We expect the log to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 		 * some variation on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		 *        x + 1 ... | x ... | x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		 * The first block with cycle number x (last_half_cycle) will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 		 * be where the new head belongs.  First we do a binary search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		 * for the first occurrence of last_half_cycle.  The binary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		 * search may not be totally accurate, so then we scan back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		 * from there looking for occurrences of last_half_cycle before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		 * us.  If that backwards scan wraps around the beginning of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		 * the log, then we look for occurrences of last_half_cycle - 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		 * at the end of the log.  The cases we're looking for look
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		 * like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		 *                               v binary search stopped here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		 *        x + 1 ... | x | x + 1 | x ... | x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		 *                   ^ but we want to locate this spot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		 * or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		 *        <---------> less than scan distance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		 *        x + 1 ... | x ... | x - 1 | x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		 *                           ^ we want to locate this spot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		stop_on_cycle = last_half_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 				last_half_cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	 * Now validate the answer.  Scan back some number of maximum possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	 * blocks and make sure each one has the expected cycle number.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	 * maximum is determined by the total possible amount of buffering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	 * in the in-core log.  The following number can be made tighter if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	 * we actually look at the block size of the filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	if (head_blk >= num_scan_bblks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		 * We are guaranteed that the entire check can be performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		 * in one buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		start_blk = head_blk - num_scan_bblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		if ((error = xlog_find_verify_cycle(log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 						start_blk, num_scan_bblks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 						stop_on_cycle, &new_blk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 			goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		if (new_blk != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			head_blk = new_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	} else {		/* need to read 2 parts of log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		 * We are going to scan backwards in the log in two parts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		 * First we scan the physical end of the log.  In this part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		 * of the log, we are looking for blocks with cycle number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		 * last_half_cycle - 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		 * If we find one, then we know that the log starts there, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		 * we've found a hole that didn't get written in going around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		 * the end of the physical log.  The simple case for this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		 *        x + 1 ... | x ... | x - 1 | x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		 *        <---------> less than scan distance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		 * If all of the blocks at the end of the log have cycle number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		 * last_half_cycle, then we check the blocks at the start of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		 * the log looking for occurrences of last_half_cycle.  If we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		 * find one, then our current estimate for the location of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		 * first occurrence of last_half_cycle is wrong and we move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		 * back to the hole we've found.  This case looks like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		 *        x + 1 ... | x | x + 1 | x ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		 *                               ^ binary search stopped here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		 * Another case we need to handle that only occurs in 256k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		 * logs is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		 *        x + 1 ... | x ... | x+1 | x ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		 *                   ^ binary search stops here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		 * In a 256k log, the scan at the end of the log will see the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		 * x + 1 blocks.  We need to skip past those since that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		 * certainly not the head of the log.  By searching for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		 * last_half_cycle-1 we accomplish that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		ASSERT(head_blk <= INT_MAX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			(xfs_daddr_t) num_scan_bblks >= head_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		start_blk = log_bbnum - (num_scan_bblks - head_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		if ((error = xlog_find_verify_cycle(log, start_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 					num_scan_bblks - (int)head_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 					(stop_on_cycle - 1), &new_blk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		if (new_blk != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 			head_blk = new_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 			goto validate_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		 * Scan beginning of log now.  The last part of the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		 * log is good.  This scan needs to verify that it doesn't find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		 * the last_half_cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		start_blk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		ASSERT(head_blk <= INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		if ((error = xlog_find_verify_cycle(log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 					start_blk, (int)head_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 					stop_on_cycle, &new_blk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		if (new_blk != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 			head_blk = new_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) validate_head:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	 * Now we need to make sure head_blk is not pointing to a block in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	 * the middle of a log record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	num_scan_bblks = XLOG_REC_SHIFT(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (head_blk >= num_scan_bblks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		/* start ptr at last block ptr before head_blk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		if (error == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		start_blk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		ASSERT(head_blk <= INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		if (error == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			/* We hit the beginning of the log during our search */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			start_blk = log_bbnum - (num_scan_bblks - head_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			new_blk = log_bbnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			ASSERT(start_blk <= INT_MAX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 				(xfs_daddr_t) log_bbnum-start_blk >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			ASSERT(head_blk <= INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			error = xlog_find_verify_log_record(log, start_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 							&new_blk, (int)head_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 			if (error == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 				error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 			if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 				goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 			if (new_blk != log_bbnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 				head_blk = new_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		} else if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	kmem_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	if (head_blk == log_bbnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		*return_head_blk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		*return_head_blk = head_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	 * When returning here, we have a good block number.  Bad block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	 * means that during a previous crash, we didn't have a clean break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	 * from cycle number N to cycle number N-1.  In this case, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	 * to find the first block with cycle number N-1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) out_free_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	kmem_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		xfs_warn(log->l_mp, "failed to find log head");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761)  * Seek backwards in the log for log record headers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763)  * Given a starting log block, walk backwards until we find the provided number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764)  * of records or hit the provided tail block. The return value is the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765)  * records encountered or a negative error code. The log block and buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766)  * pointer of the last record seen are returned in rblk and rhead respectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) xlog_rseek_logrec_hdr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	xfs_daddr_t		head_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	xfs_daddr_t		tail_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	int			count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	char			*buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	xfs_daddr_t		*rblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	struct xlog_rec_header	**rhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	bool			*wrapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	int			i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	int			error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	int			found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	char			*offset = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	xfs_daddr_t		end_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	*wrapped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	 * Walk backwards from the head block until we hit the tail or the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	 * block in the log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	end_blk = head_blk > tail_blk ? tail_blk : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	for (i = (int) head_blk - 1; i >= end_blk; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		error = xlog_bread(log, i, 1, buffer, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			*rblk = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			*rhead = (struct xlog_rec_header *) offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			if (++found == count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	 * If we haven't hit the tail block or the log record header count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	 * start looking again from the end of the physical log. Note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	 * callers can pass head == tail if the tail is not yet known.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	if (tail_blk >= head_blk && found != count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			error = xlog_bread(log, i, 1, buffer, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 				goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			if (*(__be32 *)offset ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 				*wrapped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 				*rblk = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 				*rhead = (struct xlog_rec_header *) offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 				if (++found == count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834)  * Seek forward in the log for log record headers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836)  * Given head and tail blocks, walk forward from the tail block until we find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837)  * the provided number of records or hit the head block. The return value is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838)  * number of records encountered or a negative error code. The log block and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839)  * buffer pointer of the last record seen are returned in rblk and rhead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840)  * respectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) xlog_seek_logrec_hdr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	xfs_daddr_t		head_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	xfs_daddr_t		tail_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	int			count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	char			*buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	xfs_daddr_t		*rblk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	struct xlog_rec_header	**rhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	bool			*wrapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	int			i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	int			error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	int			found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	char			*offset = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	xfs_daddr_t		end_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	*wrapped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	 * Walk forward from the tail block until we hit the head or the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	 * block in the log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	for (i = (int) tail_blk; i <= end_blk; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		error = xlog_bread(log, i, 1, buffer, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 			*rblk = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 			*rhead = (struct xlog_rec_header *) offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			if (++found == count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	 * If we haven't hit the head block or the log record header count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	 * start looking again from the start of the physical log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (tail_blk > head_blk && found != count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		for (i = 0; i < (int) head_blk; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			error = xlog_bread(log, i, 1, buffer, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 				goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 			if (*(__be32 *)offset ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			    cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 				*wrapped = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 				*rblk = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 				*rhead = (struct xlog_rec_header *) offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 				if (++found == count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907)  * Calculate distance from head to tail (i.e., unused space in the log).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) xlog_tail_distance(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	struct xlog	*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	xfs_daddr_t	head_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	xfs_daddr_t	tail_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	if (head_blk < tail_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		return tail_blk - head_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	return tail_blk + (log->l_logBBsize - head_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922)  * Verify the log tail. This is particularly important when torn or incomplete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923)  * writes have been detected near the front of the log and the head has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924)  * walked back accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926)  * We also have to handle the case where the tail was pinned and the head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927)  * blocked behind the tail right before a crash. If the tail had been pushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928)  * immediately prior to the crash and the subsequent checkpoint was only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929)  * partially written, it's possible it overwrote the last referenced tail in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930)  * log with garbage. This is not a coherency problem because the tail must have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  * been pushed before it can be overwritten, but appears as log corruption to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  * recovery because we have no way to know the tail was updated if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  * subsequent checkpoint didn't write successfully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  * Therefore, CRC check the log from tail to head. If a failure occurs and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  * offending record is within max iclog bufs from the head, walk the tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  * forward and retry until a valid tail is found or corruption is detected out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  * of the range of a possible overwrite.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) xlog_verify_tail(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	xfs_daddr_t		head_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	xfs_daddr_t		*tail_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	int			hsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	struct xlog_rec_header	*thead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	char			*buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	xfs_daddr_t		first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	int			error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	bool			wrapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	xfs_daddr_t		tmp_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	xfs_daddr_t		orig_tail = *tail_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	buffer = xlog_alloc_buffer(log, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	 * Make sure the tail points to a record (returns positive count on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	 * success).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			&tmp_tail, &thead, &wrapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	if (*tail_blk != tmp_tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		*tail_blk = tmp_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	 * Run a CRC check from the tail to the head. We can't just check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	 * MAX_ICLOGS records past the tail because the tail may point to stale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	 * blocks cleared during the search for the head/tail. These blocks are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	 * overwritten with zero-length records and thus record count is not a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	 * reliable indicator of the iclog state before a crash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	first_bad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 				      XLOG_RECOVER_CRCPASS, &first_bad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		int	tail_distance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		 * Is corruption within range of the head? If so, retry from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		 * the next record. Otherwise return an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		tail_distance = xlog_tail_distance(log, head_blk, first_bad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		/* skip to the next record; returns positive count on success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 				buffer, &tmp_tail, &thead, &wrapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		*tail_blk = tmp_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		first_bad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 					      XLOG_RECOVER_CRCPASS, &first_bad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	if (!error && *tail_blk != orig_tail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		xfs_warn(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		"Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 			 orig_tail, *tail_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	kmem_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)  * Detect and trim torn writes from the head of the log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)  * Storage without sector atomicity guarantees can result in torn writes in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016)  * log in the event of a crash. Our only means to detect this scenario is via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)  * CRC verification. While we can't always be certain that CRC verification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)  * failure is due to a torn write vs. an unrelated corruption, we do know that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)  * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)  * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)  * the log and treat failures in this range as torn writes as a matter of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)  * policy. In the event of CRC failure, the head is walked back to the last good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)  * record in the log and the tail is updated from that record and verified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) xlog_verify_head(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	xfs_daddr_t		*head_blk,	/* in/out: unverified head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	xfs_daddr_t		*tail_blk,	/* out: tail block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	char			*buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	xfs_daddr_t		*rhead_blk,	/* start blk of last record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	struct xlog_rec_header	**rhead,	/* ptr to last record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	bool			*wrapped)	/* last rec. wraps phys. log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	struct xlog_rec_header	*tmp_rhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	char			*tmp_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	xfs_daddr_t		first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	xfs_daddr_t		tmp_rhead_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	int			found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	int			error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	bool			tmp_wrapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	 * Check the head of the log for torn writes. Search backwards from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	 * head until we hit the tail or the maximum number of log record I/Os
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	 * that could have been in flight at one time. Use a temporary buffer so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	 * we don't trash the rhead/buffer pointers from the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	tmp_buffer = xlog_alloc_buffer(log, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	if (!tmp_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 				      XLOG_MAX_ICLOGS, tmp_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 				      &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	kmem_free(tmp_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	 * Now run a CRC verification pass over the records starting at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	 * block found above to the current head. If a CRC failure occurs, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	 * log block of the first bad record is saved in first_bad.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 				      XLOG_RECOVER_CRCPASS, &first_bad);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		 * We've hit a potential torn write. Reset the error and warn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		 * about it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		xfs_warn(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 			 first_bad, *head_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		 * Get the header block and buffer pointer for the last good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		 * record before the bad record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		 * Note that xlog_find_tail() clears the blocks at the new head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		 * (i.e., the records with invalid CRC) if the cycle number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		 * matches the current cycle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 				buffer, rhead_blk, rhead, wrapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		if (found < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 			return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		if (found == 0)		/* XXX: right thing to do here? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		 * Reset the head block to the starting block of the first bad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		 * log record and set the tail block based on the last good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		 * record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		 * Bail out if the updated head/tail match as this indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		 * possible corruption outside of the acceptable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		 * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		*head_blk = first_bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		*tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		if (*head_blk == *tail_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	return xlog_verify_tail(log, *head_blk, tail_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 				be32_to_cpu((*rhead)->h_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)  * We need to make sure we handle log wrapping properly, so we can't use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)  * calculated logbno directly. Make sure it wraps to the correct bno inside the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)  * log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)  * The log is limited to 32 bit sizes, so we use the appropriate modulus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)  * operation here and cast it back to a 64 bit daddr on return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) static inline xfs_daddr_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) xlog_wrap_logbno(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	xfs_daddr_t		bno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	int			mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	div_s64_rem(bno, log->l_logBBsize, &mod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	return mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)  * Check whether the head of the log points to an unmount record. In other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)  * words, determine whether the log is clean. If so, update the in-core state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)  * appropriately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) xlog_check_unmount_rec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	xfs_daddr_t		*head_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	xfs_daddr_t		*tail_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	struct xlog_rec_header	*rhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	xfs_daddr_t		rhead_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	char			*buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	bool			*clean)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	struct xlog_op_header	*op_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	xfs_daddr_t		umount_data_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	xfs_daddr_t		after_umount_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	int			hblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	int			error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	char			*offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	*clean = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	 * Look for unmount record. If we find it, then we know there was a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	 * clean unmount. Since 'i' could be the last block in the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	 * log, we convert to a log block before comparing to the head_blk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	 * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	 * below. We won't want to clear the unmount record if there is one, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	 * we pass the lsn of the unmount record rather than the block after it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	hblks = xlog_logrec_hblks(log, rhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	after_umount_blk = xlog_wrap_logbno(log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	if (*head_blk == after_umount_blk &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	    be32_to_cpu(rhead->h_num_logops) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		op_head = (struct xlog_op_header *)offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 			 * Set tail and last sync so that newly written log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 			 * records will point recovery to after the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			 * unmount record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 			xlog_assign_atomic_lsn(&log->l_tail_lsn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 					log->l_curr_cycle, after_umount_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 			xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 					log->l_curr_cycle, after_umount_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 			*tail_blk = after_umount_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 			*clean = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) xlog_set_state(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	xfs_daddr_t		head_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	struct xlog_rec_header	*rhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	xfs_daddr_t		rhead_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	bool			bump_cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	 * Reset log values according to the state of the log when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	 * crashed.  In the case where head_blk == 0, we bump curr_cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	 * one because the next write starts a new cycle rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	 * continuing the cycle of the last good log record.  At this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	 * point we have guaranteed that all partial log records have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	 * accounted for.  Therefore, we know that the last good log record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	 * written was complete and ended exactly on the end boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	 * of the physical log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	log->l_prev_block = rhead_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	log->l_curr_block = (int)head_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	if (bump_cycle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		log->l_curr_cycle++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 					BBTOB(log->l_curr_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 					BBTOB(log->l_curr_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)  * Find the sync block number or the tail of the log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)  * This will be the block number of the last record to have its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)  * associated buffers synced to disk.  Every log record header has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)  * a sync lsn embedded in it.  LSNs hold block numbers, so it is easy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)  * to get a sync block number.  The only concern is to figure out which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)  * log record header to believe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)  * The following algorithm uses the log record header with the largest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)  * lsn.  The entire log record does not need to be valid.  We only care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239)  * that the header is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)  * We could speed up search by using current head_blk buffer, but it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)  * available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) xlog_find_tail(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	xfs_daddr_t		*head_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	xfs_daddr_t		*tail_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	xlog_rec_header_t	*rhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	char			*offset = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	char			*buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	int			error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	xfs_daddr_t		rhead_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	xfs_lsn_t		tail_lsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	bool			wrapped = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	bool			clean = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	 * Find previous log record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	if ((error = xlog_find_head(log, head_blk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	ASSERT(*head_blk < INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	buffer = xlog_alloc_buffer(log, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	if (*head_blk == 0) {				/* special case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		error = xlog_bread(log, 0, 1, buffer, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		if (xlog_get_cycle(offset) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 			*tail_blk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 			/* leave all other log inited values alone */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	 * Search backwards through the log looking for the log record header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	 * block. This wraps all the way back around to the head so something is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	 * seriously wrong if we can't find it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 				      &rhead_blk, &rhead, &wrapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	*tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	 * Set the log state based on the current head record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	tail_lsn = atomic64_read(&log->l_tail_lsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	 * Look for an unmount record at the head of the log. This sets the log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	 * state to determine whether recovery is necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 				       rhead_blk, buffer, &clean);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	 * Verify the log head if the log is not clean (e.g., we have anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	 * but an unmount record at the head). This uses CRC verification to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	 * detect and trim torn writes. If discovered, CRC failures are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	 * considered torn writes and the log head is trimmed accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	 * Note that we can only run CRC verification when the log is dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	 * because there's no guarantee that the log data behind an unmount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	 * record is compatible with the current architecture.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	if (!clean) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		xfs_daddr_t	orig_head = *head_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		error = xlog_verify_head(log, head_blk, tail_blk, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 					 &rhead_blk, &rhead, &wrapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		/* update in-core state again if the head changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		if (*head_blk != orig_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			xlog_set_state(log, *head_blk, rhead, rhead_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 				       wrapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 			tail_lsn = atomic64_read(&log->l_tail_lsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 			error = xlog_check_unmount_rec(log, head_blk, tail_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 						       rhead, rhead_blk, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 						       &clean);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 				goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	 * Note that the unmount was clean. If the unmount was not clean, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	 * need to know this to rebuild the superblock counters from the perag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	 * headers if we have a filesystem using non-persistent counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	if (clean)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	 * Make sure that there are no blocks in front of the head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	 * with the same cycle number as the head.  This can happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	 * because we allow multiple outstanding log writes concurrently,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	 * and the later writes might make it out before earlier ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	 * We use the lsn from before modifying it so that we'll never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	 * overwrite the unmount record after a clean unmount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	 * Do this only if we are going to recover the filesystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	 * NOTE: This used to say "if (!readonly)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	 * However on Linux, we can & do recover a read-only filesystem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	 * We only skip recovery if NORECOVERY is specified on mount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	 * in which case we would not be here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	 * But... if the -device- itself is readonly, just skip this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	 * We can't recover this device anyway, so it won't matter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	if (!xfs_readonly_buftarg(log->l_targ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		error = xlog_clear_stale_blocks(log, tail_lsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	kmem_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		xfs_warn(log->l_mp, "failed to locate log tail");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)  * Is the log zeroed at all?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)  * The last binary search should be changed to perform an X block read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)  * once X becomes small enough.  You can then search linearly through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)  * the X blocks.  This will cut down on the number of reads we need to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)  * If the log is partially zeroed, this routine will pass back the blkno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)  * of the first block with cycle number 0.  It won't have a complete LR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)  * preceding it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)  *	0  => the log is completely written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)  *	1 => use *blk_no as the first block of the log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)  *	<0 => error has occurred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) xlog_find_zeroed(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	struct xlog	*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	xfs_daddr_t	*blk_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	char		*buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	char		*offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	uint	        first_cycle, last_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	xfs_daddr_t	new_blk, last_blk, start_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	xfs_daddr_t     num_scan_bblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	int	        error, log_bbnum = log->l_logBBsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	*blk_no = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	/* check totally zeroed log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	buffer = xlog_alloc_buffer(log, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	if (!buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	error = xlog_bread(log, 0, 1, buffer, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	first_cycle = xlog_get_cycle(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	if (first_cycle == 0) {		/* completely zeroed log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		*blk_no = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		kmem_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	/* check partially zeroed log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	last_cycle = xlog_get_cycle(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	if (last_cycle != 0) {		/* log completely written to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		kmem_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	/* we have a partially zeroed log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	last_blk = log_bbnum-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	 * Validate the answer.  Because there is no way to guarantee that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	 * the entire log is made up of log records which are the same size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	 * we scan over the defined maximum blocks.  At this point, the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	 * is not chosen to mean anything special.   XXXmiken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	ASSERT(num_scan_bblks <= INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	if (last_blk < num_scan_bblks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		num_scan_bblks = last_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	start_blk = last_blk - num_scan_bblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	 * We search for any instances of cycle number 0 that occur before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	 * our current estimate of the head.  What we're trying to detect is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	 *        1 ... | 0 | 1 | 0...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	 *                       ^ binary search ends here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	if ((error = xlog_find_verify_cycle(log, start_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 					 (int)num_scan_bblks, 0, &new_blk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	if (new_blk != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		last_blk = new_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	 * Potentially backup over partial log record write.  We don't need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	 * to search the end of the log because we know it is zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	if (error == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	*blk_no = last_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) out_free_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	kmem_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)  * These are simple subroutines used by xlog_clear_stale_blocks() below
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)  * to initialize a buffer full of empty log record headers and write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)  * them into the log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) xlog_add_record(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	char			*buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	int			cycle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	int			block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	int			tail_cycle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	int			tail_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	xlog_rec_header_t	*recp = (xlog_rec_header_t *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	memset(buf, 0, BBSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	recp->h_cycle = cpu_to_be32(cycle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	recp->h_version = cpu_to_be32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 			xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	recp->h_fmt = cpu_to_be32(XLOG_FMT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) xlog_write_log_records(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	struct xlog	*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	int		cycle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	int		start_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	int		blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	int		tail_cycle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	int		tail_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	char		*offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	char		*buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	int		balign, ealign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	int		sectbb = log->l_sectBBsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	int		end_block = start_block + blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	int		bufblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	int		error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	int		i, j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	 * Greedily allocate a buffer big enough to handle the full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	 * range of basic blocks to be written.  If that fails, try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	 * a smaller size.  We need to be able to write at least a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	 * log sector, or we're out of luck.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	bufblks = 1 << ffs(blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	while (bufblks > log->l_logBBsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		bufblks >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		bufblks >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		if (bufblks < sectbb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	/* We may need to do a read at the start to fill in part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	 * the buffer in the starting sector not covered by the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	 * write below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	balign = round_down(start_block, sectbb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	if (balign != start_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		error = xlog_bread_noalign(log, start_block, 1, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 			goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		j = start_block - balign;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	for (i = start_block; i < end_block; i += bufblks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		int		bcount, endcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 		bcount = min(bufblks, end_block - start_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		endcount = bcount - j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		/* We may need to do a read at the end to fill in part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		 * the buffer in the final sector not covered by the write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		 * If this is the same sector as the above read, skip it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		ealign = round_down(end_block, sectbb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		if (j == 0 && (start_block + endcount > ealign)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 			error = xlog_bread_noalign(log, ealign, sectbb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 					buffer + BBTOB(ealign - start_block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 			if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		offset = buffer + xlog_align(log, start_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		for (; j < endcount; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 			xlog_add_record(log, offset, cycle, i+j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 					tail_cycle, tail_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 			offset += BBSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		error = xlog_bwrite(log, start_block, endcount, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		start_block += endcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) out_free_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	kmem_free(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)  * This routine is called to blow away any incomplete log writes out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)  * in front of the log head.  We do this so that we won't become confused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)  * if we come up, write only a little bit more, and then crash again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)  * If we leave the partial log records out there, this situation could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)  * cause us to think those partial writes are valid blocks since they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)  * have the current cycle number.  We get rid of them by overwriting them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)  * with empty log records with the old cycle number rather than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)  * current one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)  * The tail lsn is passed in rather than taken from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)  * the log so that we will not write over the unmount record after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)  * clean unmount in a 512 block log.  Doing so would leave the log without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)  * any valid log records in it until a new one was written.  If we crashed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)  * during that time we would not be able to recover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) xlog_clear_stale_blocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	struct xlog	*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	xfs_lsn_t	tail_lsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	int		tail_cycle, head_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	int		tail_block, head_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	int		tail_distance, max_distance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	int		distance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	int		error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	tail_cycle = CYCLE_LSN(tail_lsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	tail_block = BLOCK_LSN(tail_lsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	head_cycle = log->l_curr_cycle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	head_block = log->l_curr_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	 * Figure out the distance between the new head of the log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	 * and the tail.  We want to write over any blocks beyond the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	 * head that we may have written just before the crash, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	 * we don't want to overwrite the tail of the log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	if (head_cycle == tail_cycle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		 * The tail is behind the head in the physical log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		 * so the distance from the head to the tail is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		 * distance from the head to the end of the log plus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		 * the distance from the beginning of the log to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		 * tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		if (XFS_IS_CORRUPT(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 				   head_block < tail_block ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 				   head_block >= log->l_logBBsize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 			return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		tail_distance = tail_block + (log->l_logBBsize - head_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		 * The head is behind the tail in the physical log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		 * so the distance from the head to the tail is just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		 * the tail block minus the head block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		if (XFS_IS_CORRUPT(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 				   head_block >= tail_block ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 				   head_cycle != tail_cycle + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 			return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		tail_distance = tail_block - head_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	 * If the head is right up against the tail, we can't clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	 * anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	if (tail_distance <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		ASSERT(tail_distance == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	max_distance = XLOG_TOTAL_REC_SHIFT(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	 * Take the smaller of the maximum amount of outstanding I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	 * we could have and the distance to the tail to clear out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	 * We take the smaller so that we don't overwrite the tail and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	 * we don't waste all day writing from the head to the tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	 * for no reason.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	max_distance = min(max_distance, tail_distance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	if ((head_block + max_distance) <= log->l_logBBsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		 * We can stomp all the blocks we need to without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		 * wrapping around the end of the log.  Just do it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		 * in a single write.  Use the cycle number of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		 * current cycle minus one so that the log will look like:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		 *     n ... | n - 1 ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		error = xlog_write_log_records(log, (head_cycle - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 				head_block, max_distance, tail_cycle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 				tail_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		 * We need to wrap around the end of the physical log in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		 * order to clear all the blocks.  Do it in two separate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		 * I/Os.  The first write should be from the head to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		 * end of the physical log, and it should use the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		 * cycle number minus one just like above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		distance = log->l_logBBsize - head_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		error = xlog_write_log_records(log, (head_cycle - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 				head_block, distance, tail_cycle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 				tail_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 		 * Now write the blocks at the start of the physical log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		 * This writes the remainder of the blocks we want to clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 		 * It uses the current cycle number since we're now on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		 * same cycle as the head so that we get:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		 *    n ... n ... | n - 1 ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		 *    ^^^^^ blocks we're writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		distance = max_distance - (log->l_logBBsize - head_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		error = xlog_write_log_records(log, head_cycle, 0, distance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 				tail_cycle, tail_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)  * Release the recovered intent item in the AIL that matches the given intent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)  * type and intent id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) xlog_recover_release_intent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	unsigned short		intent_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	uint64_t		intent_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	struct xfs_ail_cursor	cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	struct xfs_log_item	*lip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	struct xfs_ail		*ailp = log->l_ailp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	spin_lock(&ailp->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	     lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		if (lip->li_type != intent_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		if (!lip->li_ops->iop_match(lip, intent_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		spin_unlock(&ailp->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		lip->li_ops->iop_release(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		spin_lock(&ailp->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	xfs_trans_ail_cursor_done(&cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	spin_unlock(&ailp->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) /******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)  *		Log recover routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)  ******************************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	&xlog_buf_item_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	&xlog_inode_item_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	&xlog_dquot_item_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	&xlog_quotaoff_item_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	&xlog_icreate_item_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	&xlog_efi_item_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	&xlog_efd_item_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	&xlog_rui_item_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	&xlog_rud_item_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	&xlog_cui_item_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	&xlog_cud_item_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	&xlog_bui_item_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	&xlog_bud_item_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) static const struct xlog_recover_item_ops *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) xlog_find_item_ops(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	struct xlog_recover_item		*item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	unsigned int				i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 		if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 			return xlog_recover_item_ops[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)  * Sort the log items in the transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)  * The ordering constraints are defined by the inode allocation and unlink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)  * behaviour. The rules are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)  *	1. Every item is only logged once in a given transaction. Hence it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)  *	   represents the last logged state of the item. Hence ordering is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)  *	   dependent on the order in which operations need to be performed so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)  *	   required initial conditions are always met.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)  *	2. Cancelled buffers are recorded in pass 1 in a separate table and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)  *	   there's nothing to replay from them so we can simply cull them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)  *	   from the transaction. However, we can't do that until after we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)  *	   replayed all the other items because they may be dependent on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)  *	   cancelled buffer and replaying the cancelled buffer can remove it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)  *	   form the cancelled buffer table. Hence they have tobe done last.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)  *	3. Inode allocation buffers must be replayed before inode items that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)  *	   read the buffer and replay changes into it. For filesystems using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)  *	   ICREATE transactions, this means XFS_LI_ICREATE objects need to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)  *	   treated the same as inode allocation buffers as they create and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)  *	   initialise the buffers directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)  *	4. Inode unlink buffers must be replayed after inode items are replayed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)  *	   This ensures that inodes are completely flushed to the inode buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)  *	   in a "free" state before we remove the unlinked inode list pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)  * Hence the ordering needs to be inode allocation buffers first, inode items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)  * second, inode unlink buffers third and cancelled buffers last.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)  * But there's a problem with that - we can't tell an inode allocation buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)  * apart from a regular buffer, so we can't separate them. We can, however,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)  * tell an inode unlink buffer from the others, and so we can separate them out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)  * from all the other buffers and move them to last.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)  * Hence, 4 lists, in order from head to tail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)  *	- buffer_list for all buffers except cancelled/inode unlink buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)  *	- item_list for all non-buffer items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832)  *	- inode_buffer_list for inode unlink buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)  *	- cancel_list for the cancelled buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)  * Note that we add objects to the tail of the lists so that first-to-last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)  * ordering is preserved within the lists. Adding objects to the head of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)  * list means when we traverse from the head we walk them in last-to-first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)  * order. For cancelled buffers and inode unlink buffers this doesn't matter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)  * but for all other items there may be specific ordering that we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)  * preserve.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) xlog_recover_reorder_trans(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	struct xlog_recover	*trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	int			pass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	struct xlog_recover_item *item, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	int			error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	LIST_HEAD(sort_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	LIST_HEAD(cancel_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	LIST_HEAD(buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	LIST_HEAD(inode_buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	LIST_HEAD(item_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	list_splice_init(&trans->r_itemq, &sort_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	list_for_each_entry_safe(item, n, &sort_list, ri_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		enum xlog_recover_reorder	fate = XLOG_REORDER_ITEM_LIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		item->ri_ops = xlog_find_item_ops(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		if (!item->ri_ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 			xfs_warn(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 				"%s: unrecognized type of log operation (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 				__func__, ITEM_TYPE(item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 			ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 			 * return the remaining items back to the transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 			 * item list so they can be freed in caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 			if (!list_empty(&sort_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 				list_splice_init(&sort_list, &trans->r_itemq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 			error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		if (item->ri_ops->reorder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 			fate = item->ri_ops->reorder(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		switch (fate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		case XLOG_REORDER_BUFFER_LIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 			list_move_tail(&item->ri_list, &buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		case XLOG_REORDER_CANCEL_LIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 			trace_xfs_log_recover_item_reorder_head(log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 					trans, item, pass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 			list_move(&item->ri_list, &cancel_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 		case XLOG_REORDER_INODE_BUFFER_LIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 			list_move(&item->ri_list, &inode_buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		case XLOG_REORDER_ITEM_LIST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 			trace_xfs_log_recover_item_reorder_tail(log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 							trans, item, pass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 			list_move_tail(&item->ri_list, &item_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	ASSERT(list_empty(&sort_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 	if (!list_empty(&buffer_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		list_splice(&buffer_list, &trans->r_itemq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	if (!list_empty(&item_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		list_splice_tail(&item_list, &trans->r_itemq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	if (!list_empty(&inode_buffer_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		list_splice_tail(&inode_buffer_list, &trans->r_itemq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	if (!list_empty(&cancel_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		list_splice_tail(&cancel_list, &trans->r_itemq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) xlog_buf_readahead(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	xfs_daddr_t		blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	uint			len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	const struct xfs_buf_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	if (!xlog_is_buffer_cancelled(log, blkno, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) xlog_recover_items_pass2(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	struct xlog                     *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	struct xlog_recover             *trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	struct list_head                *buffer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	struct list_head                *item_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	struct xlog_recover_item	*item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	int				error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	list_for_each_entry(item, item_list, ri_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		trace_xfs_log_recover_item_recover(log, trans, item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 				XLOG_RECOVER_PASS2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		if (item->ri_ops->commit_pass2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 			error = item->ri_ops->commit_pass2(log, buffer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 					item, trans->r_lsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)  * Perform the transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)  * If the transaction modifies a buffer or inode, do it now.  Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)  * EFIs and EFDs get queued up by adding entries into the AIL for them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) xlog_recover_commit_trans(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	struct xlog_recover	*trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	int			pass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	struct list_head	*buffer_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	int				error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	int				items_queued = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	struct xlog_recover_item	*item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	struct xlog_recover_item	*next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	LIST_HEAD			(ra_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	LIST_HEAD			(done_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	#define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	hlist_del_init(&trans->r_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	error = xlog_recover_reorder_trans(log, trans, pass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		trace_xfs_log_recover_item_recover(log, trans, item, pass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		switch (pass) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		case XLOG_RECOVER_PASS1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 			if (item->ri_ops->commit_pass1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 				error = item->ri_ops->commit_pass1(log, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		case XLOG_RECOVER_PASS2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 			if (item->ri_ops->ra_pass2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 				item->ri_ops->ra_pass2(log, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 			list_move_tail(&item->ri_list, &ra_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 			items_queued++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 			if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 				error = xlog_recover_items_pass2(log, trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 						buffer_list, &ra_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 				list_splice_tail_init(&ra_list, &done_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 				items_queued = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 			ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	if (!list_empty(&ra_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 			error = xlog_recover_items_pass2(log, trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 					buffer_list, &ra_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		list_splice_tail_init(&ra_list, &done_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	if (!list_empty(&done_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		list_splice_init(&done_list, &trans->r_itemq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) xlog_recover_add_item(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	struct list_head	*head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	struct xlog_recover_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	item = kmem_zalloc(sizeof(struct xlog_recover_item), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	INIT_LIST_HEAD(&item->ri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	list_add_tail(&item->ri_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) xlog_recover_add_to_cont_trans(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	struct xlog_recover	*trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	char			*dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	int			len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	struct xlog_recover_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	char			*ptr, *old_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	int			old_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	 * If the transaction is empty, the header was split across this and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	 * previous record. Copy the rest of the header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	if (list_empty(&trans->r_itemq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		ASSERT(len <= sizeof(struct xfs_trans_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		if (len > sizeof(struct xfs_trans_header)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 			xfs_warn(log->l_mp, "%s: bad header length", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 			return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		xlog_recover_add_item(&trans->r_itemq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		ptr = (char *)&trans->r_theader +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 				sizeof(struct xfs_trans_header) - len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 		memcpy(ptr, dp, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	/* take the tail entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 			  ri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	old_len = item->ri_buf[item->ri_cnt-1].i_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	ptr = krealloc(old_ptr, len + old_len, GFP_KERNEL | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	memcpy(&ptr[old_len], dp, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	item->ri_buf[item->ri_cnt-1].i_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	item->ri_buf[item->ri_cnt-1].i_addr = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)  * The next region to add is the start of a new region.  It could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)  * a whole region or it could be the first part of a new region.  Because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)  * of this, the assumption here is that the type and size fields of all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)  * format structures fit into the first 32 bits of the structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)  * This works because all regions must be 32 bit aligned.  Therefore, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079)  * either have both fields or we have neither field.  In the case we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)  * neither field, the data part of the region is zero length.  We only have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)  * a log_op_header and can throw away the header since a new one will appear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)  * later.  If we have at least 4 bytes, then we can determine how many regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)  * will appear in the current log item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) xlog_recover_add_to_trans(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	struct xlog_recover	*trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	char			*dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	int			len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	struct xfs_inode_log_format	*in_f;			/* any will do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	struct xlog_recover_item *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	char			*ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	if (list_empty(&trans->r_itemq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		/* we need to catch log corruptions here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 			xfs_warn(log->l_mp, "%s: bad header magic number",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 				__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 			ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 			return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		if (len > sizeof(struct xfs_trans_header)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 			xfs_warn(log->l_mp, "%s: bad header length", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 			ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 			return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		 * The transaction header can be arbitrarily split across op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		 * records. If we don't have the whole thing here, copy what we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		 * do have and handle the rest in the next record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 		if (len == sizeof(struct xfs_trans_header))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 			xlog_recover_add_item(&trans->r_itemq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		memcpy(&trans->r_theader, dp, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	ptr = kmem_alloc(len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	memcpy(ptr, dp, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	in_f = (struct xfs_inode_log_format *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	/* take the tail entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 			  ri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	if (item->ri_total != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	     item->ri_total == item->ri_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		/* tail item is in use, get a new one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		xlog_recover_add_item(&trans->r_itemq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		item = list_entry(trans->r_itemq.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 					struct xlog_recover_item, ri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	if (item->ri_total == 0) {		/* first region to be added */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		if (in_f->ilf_size == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		    in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 			xfs_warn(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 		"bad number of regions (%d) in inode log format",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 				  in_f->ilf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 			ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 			kmem_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 			return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 		item->ri_total = in_f->ilf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 		item->ri_buf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 			kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 				    0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	if (item->ri_total <= item->ri_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 		xfs_warn(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	"log item region count (%d) overflowed size (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 				item->ri_cnt, item->ri_total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 		kmem_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	/* Description region is ri_buf[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	item->ri_buf[item->ri_cnt].i_addr = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	item->ri_buf[item->ri_cnt].i_len  = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	item->ri_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	trace_xfs_log_recover_item_add(log, trans, item, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)  * Free up any resources allocated by the transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)  * Remember that EFIs, EFDs, and IUNLINKs are handled later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) xlog_recover_free_trans(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	struct xlog_recover	*trans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	struct xlog_recover_item *item, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	int			i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	hlist_del_init(&trans->r_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		/* Free the regions in the item. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		list_del(&item->ri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		for (i = 0; i < item->ri_cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 			kmem_free(item->ri_buf[i].i_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		/* Free the item itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 		kmem_free(item->ri_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		kmem_free(item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	/* Free the transaction recover structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	kmem_free(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)  * On error or completion, trans is freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) xlog_recovery_process_trans(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	struct xlog_recover	*trans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	char			*dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 	unsigned int		len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	unsigned int		flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	int			pass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	struct list_head	*buffer_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	int			error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	bool			freeit = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	/* mask off ophdr transaction container flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	flags &= ~XLOG_END_TRANS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	if (flags & XLOG_WAS_CONT_TRANS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		flags &= ~XLOG_CONTINUE_TRANS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	 * Callees must not free the trans structure. We'll decide if we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	 * free it or not based on the operation being done and it's result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	switch (flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	/* expected flag values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	case XLOG_CONTINUE_TRANS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		error = xlog_recover_add_to_trans(log, trans, dp, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	case XLOG_WAS_CONT_TRANS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	case XLOG_COMMIT_TRANS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		error = xlog_recover_commit_trans(log, trans, pass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 						  buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		/* success or fail, we are now done with this transaction. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 		freeit = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	/* unexpected flag values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	case XLOG_UNMOUNT_TRANS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 		/* just skip trans */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 		freeit = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	case XLOG_START_TRANS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	if (error || freeit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 		xlog_recover_free_trans(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)  * Lookup the transaction recovery structure associated with the ID in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)  * current ophdr. If the transaction doesn't exist and the start flag is set in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)  * the ophdr, then allocate a new transaction for future ID matches to find.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263)  * Either way, return what we found during the lookup - an existing transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)  * or nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) STATIC struct xlog_recover *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) xlog_recover_ophdr_to_trans(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 	struct hlist_head	rhash[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	struct xlog_rec_header	*rhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	struct xlog_op_header	*ohead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	struct xlog_recover	*trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	xlog_tid_t		tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	struct hlist_head	*rhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	tid = be32_to_cpu(ohead->oh_tid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	rhp = &rhash[XLOG_RHASH(tid)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	hlist_for_each_entry(trans, rhp, r_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		if (trans->r_log_tid == tid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 			return trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	 * skip over non-start transaction headers - we could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	 * processing slack space before the next transaction starts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	if (!(ohead->oh_flags & XLOG_START_TRANS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	ASSERT(be32_to_cpu(ohead->oh_len) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	 * This is a new transaction so allocate a new recovery container to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	 * hold the recovery ops that will follow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	trans->r_log_tid = tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	trans->r_lsn = be64_to_cpu(rhead->h_lsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	INIT_LIST_HEAD(&trans->r_itemq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 	INIT_HLIST_NODE(&trans->r_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	hlist_add_head(&trans->r_list, rhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	 * Nothing more to do for this ophdr. Items to be added to this new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 	 * transaction will be in subsequent ophdr containers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) xlog_recover_process_ophdr(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	struct hlist_head	rhash[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	struct xlog_rec_header	*rhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	struct xlog_op_header	*ohead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	char			*dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	char			*end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	int			pass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	struct list_head	*buffer_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	struct xlog_recover	*trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	unsigned int		len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	int			error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	/* Do we understand who wrote this op? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	if (ohead->oh_clientid != XFS_TRANSACTION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	    ohead->oh_clientid != XFS_LOG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 		xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 			__func__, ohead->oh_clientid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 		ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	 * Check the ophdr contains all the data it is supposed to contain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	len = be32_to_cpu(ohead->oh_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	if (dp + len > end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	if (!trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 		/* nothing to do, so skip over this ophdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	 * The recovered buffer queue is drained only once we know that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	 * recovery items for the current LSN have been processed. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 	 * required because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	 * - Buffer write submission updates the metadata LSN of the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	 * - Log recovery skips items with a metadata LSN >= the current LSN of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	 *   the recovery item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	 * - Separate recovery items against the same metadata buffer can share
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	 *   a current LSN. I.e., consider that the LSN of a recovery item is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	 *   defined as the starting LSN of the first record in which its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	 *   transaction appears, that a record can hold multiple transactions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	 *   and/or that a transaction can span multiple records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	 * In other words, we are allowed to submit a buffer from log recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	 * once per current LSN. Otherwise, we may incorrectly skip recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	 * items and cause corruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	 * We don't know up front whether buffers are updated multiple times per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	 * LSN. Therefore, track the current LSN of each commit log record as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	 * is processed and drain the queue when it changes. Use commit records
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	 * because they are ordered correctly by the logging code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	if (log->l_recovery_lsn != trans->r_lsn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	    ohead->oh_flags & XLOG_COMMIT_TRANS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 		error = xfs_buf_delwri_submit(buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		log->l_recovery_lsn = trans->r_lsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	return xlog_recovery_process_trans(log, trans, dp, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 					   ohead->oh_flags, pass, buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)  * There are two valid states of the r_state field.  0 indicates that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387)  * transaction structure is in a normal state.  We have either seen the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)  * start of the transaction or the last operation we added was not a partial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389)  * operation.  If the last operation we added to the transaction was a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)  * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)  * NOTE: skip LRs with 0 data length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) xlog_recover_process_data(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	struct hlist_head	rhash[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	struct xlog_rec_header	*rhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	char			*dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	int			pass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	struct list_head	*buffer_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	struct xlog_op_header	*ohead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	char			*end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 	int			num_logops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 	int			error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	end = dp + be32_to_cpu(rhead->h_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	num_logops = be32_to_cpu(rhead->h_num_logops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	/* check the log format matches our own - else we can't recover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 	if (xlog_header_check_recover(log->l_mp, rhead))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	trace_xfs_log_recover_record(log, rhead, pass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	while ((dp < end) && num_logops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 		ohead = (struct xlog_op_header *)dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 		dp += sizeof(*ohead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 		ASSERT(dp <= end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 		/* errors will abort recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 						   dp, end, pass, buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		dp += be32_to_cpu(ohead->oh_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 		num_logops--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) /* Take all the collected deferred ops and finish them in order. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) xlog_finish_defer_ops(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	struct xfs_mount	*mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	struct list_head	*capture_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 	struct xfs_defer_capture *dfc, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 	struct xfs_trans	*tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	struct xfs_inode	*ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	int			error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		struct xfs_trans_res	resv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		 * Create a new transaction reservation from the captured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 		 * information.  Set logcount to 1 to force the new transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 		 * to regrant every roll so that we can make forward progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 		 * in recovery no matter how full the log might be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 		resv.tr_logres = dfc->dfc_logres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 		resv.tr_logcount = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 		resv.tr_logflags = XFS_TRANS_PERM_LOG_RES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 		error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 				dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 		 * Transfer to this new transaction all the dfops we captured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 		 * from recovering a single intent item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 		list_del_init(&dfc->dfc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 		xfs_defer_ops_continue(dfc, tp, &ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 		error = xfs_trans_commit(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		if (ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 			xfs_iunlock(ip, XFS_ILOCK_EXCL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 			xfs_irele(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	ASSERT(list_empty(capture_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) /* Release all the captured defer ops and capture structures in this list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) xlog_abort_defer_ops(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	struct xfs_mount		*mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 	struct list_head		*capture_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	struct xfs_defer_capture	*dfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	struct xfs_defer_capture	*next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 		list_del_init(&dfc->dfc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 		xfs_defer_ops_release(mp, dfc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)  * When this is called, all of the log intent items which did not have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)  * corresponding log done items should be in the AIL.  What we do now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)  * is update the data structures associated with each one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)  * Since we process the log intent items in normal transactions, they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)  * will be removed at some point after the commit.  This prevents us
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)  * from just walking down the list processing each one.  We'll use a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)  * flag in the intent item to skip those that we've already processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)  * and use the AIL iteration mechanism's generation count to try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)  * speed this up at least a bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)  * When we start, we know that the intents are the only things in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)  * AIL.  As we process them, however, other items are added to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)  * AIL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) xlog_recover_process_intents(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	struct xlog		*log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	LIST_HEAD(capture_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 	struct xfs_ail_cursor	cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	struct xfs_log_item	*lip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	struct xfs_ail		*ailp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 	int			error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) #if defined(DEBUG) || defined(XFS_WARN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	xfs_lsn_t		last_lsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	ailp = log->l_ailp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	spin_lock(&ailp->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) #if defined(DEBUG) || defined(XFS_WARN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 	     lip != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 	     lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 		 * We're done when we see something other than an intent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 		 * There should be no intents left in the AIL now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 		if (!xlog_item_is_intent(lip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 				ASSERT(!xlog_item_is_intent(lip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		 * We should never see a redo item with a LSN higher than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 		 * the last transaction we found in the log at the start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 		 * of recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 		ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 		 * NOTE: If your intent processing routine can create more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 		 * deferred ops, you /must/ attach them to the capture list in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		 * the recover routine or else those subsequent intents will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 		 * replayed in the wrong order!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 		spin_unlock(&ailp->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 		error = lip->li_ops->iop_recover(lip, &capture_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 		spin_lock(&ailp->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	xfs_trans_ail_cursor_done(&cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	spin_unlock(&ailp->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 	error = xlog_finish_defer_ops(log->l_mp, &capture_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	xlog_abort_defer_ops(log->l_mp, &capture_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582)  * A cancel occurs when the mount has failed and we're bailing out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)  * Release all pending log intent items so they don't pin the AIL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) xlog_recover_cancel_intents(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	struct xlog		*log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 	struct xfs_log_item	*lip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	struct xfs_ail_cursor	cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	struct xfs_ail		*ailp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	ailp = log->l_ailp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	spin_lock(&ailp->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	while (lip != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 		 * We're done when we see something other than an intent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 		 * There should be no intents left in the AIL now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 		if (!xlog_item_is_intent(lip)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 			for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 				ASSERT(!xlog_item_is_intent(lip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 		spin_unlock(&ailp->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 		lip->li_ops->iop_release(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 		spin_lock(&ailp->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 		lip = xfs_trans_ail_cursor_next(ailp, &cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	xfs_trans_ail_cursor_done(&cur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	spin_unlock(&ailp->ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620)  * This routine performs a transaction to null out a bad inode pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)  * in an agi unlinked inode hash bucket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) xlog_recover_clear_agi_bucket(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	xfs_mount_t	*mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	xfs_agnumber_t	agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	int		bucket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	xfs_trans_t	*tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	xfs_agi_t	*agi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	xfs_buf_t	*agibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	int		offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	int		error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 		goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	error = xfs_read_agi(mp, tp, agno, &agibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 		goto out_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	agi = agibp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	offset = offsetof(xfs_agi_t, agi_unlinked) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 		 (sizeof(xfs_agino_t) * bucket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	xfs_trans_log_buf(tp, agibp, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 			  (offset + sizeof(xfs_agino_t) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	error = xfs_trans_commit(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 		goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) out_abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	xfs_trans_cancel(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) STATIC xfs_agino_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) xlog_recover_process_one_iunlink(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	struct xfs_mount		*mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	xfs_agnumber_t			agno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	xfs_agino_t			agino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	int				bucket)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	struct xfs_buf			*ibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	struct xfs_dinode		*dip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 	struct xfs_inode		*ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	xfs_ino_t			ino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	int				error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	ino = XFS_AGINO_TO_INO(mp, agno, agino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	 * Get the on disk inode to find the next inode in the bucket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 		goto fail_iput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	xfs_iflags_clear(ip, XFS_IRECOVERY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 	ASSERT(VFS_I(ip)->i_nlink == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	ASSERT(VFS_I(ip)->i_mode != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	/* setup for the next pass */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	agino = be32_to_cpu(dip->di_next_unlinked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	xfs_buf_relse(ibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	 * Prevent any DMAPI event from being sent when the reference on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 	 * the inode is dropped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	ip->i_d.di_dmevmask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	xfs_irele(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	return agino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704)  fail_iput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	xfs_irele(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)  fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	 * We can't read in the inode this bucket points to, or this inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	 * is messed up.  Just ditch this bucket of inodes.  We will lose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	 * some inodes and space, but at least we won't hang.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 	 * Call xlog_recover_clear_agi_bucket() to perform a transaction to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	 * clear the inode pointer in the bucket.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	xlog_recover_clear_agi_bucket(mp, agno, bucket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 	return NULLAGINO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)  * Recover AGI unlinked lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722)  * This is called during recovery to process any inodes which we unlinked but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723)  * not freed when the system crashed.  These inodes will be on the lists in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)  * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725)  * any inodes found on the lists. Each inode is removed from the lists when it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726)  * has been fully truncated and is freed. The freeing of the inode and its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727)  * removal from the list must be atomic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)  * If everything we touch in the agi processing loop is already in memory, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)  * loop can hold the cpu for a long time. It runs without lock contention,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731)  * memory allocation contention, the need wait for IO, etc, and so will run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)  * until we either run out of inodes to process, run low on memory or we run out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)  * of log space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735)  * This behaviour is bad for latency on single CPU and non-preemptible kernels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)  * and can prevent other filesytem work (such as CIL pushes) from running. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)  * can lead to deadlocks if the recovery process runs out of log reservation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)  * space. Hence we need to yield the CPU when there is other kernel work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)  * scheduled on this CPU to ensure other scheduled work can run without undue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)  * latency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) xlog_recover_process_iunlinks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	struct xlog	*log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	xfs_mount_t	*mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	xfs_agnumber_t	agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	xfs_agi_t	*agi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 	xfs_buf_t	*agibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	xfs_agino_t	agino;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 	int		bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 	int		error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 	mp = log->l_mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 		 * Find the agi for this ag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 		error = xfs_read_agi(mp, NULL, agno, &agibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 		if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 			 * AGI is b0rked. Don't process it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 			 * We should probably mark the filesystem as corrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 			 * after we've recovered all the ag's we can....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 		 * Unlock the buffer so that it can be acquired in the normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 		 * course of the transaction to truncate and free each inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 		 * Because we are not racing with anyone else here for the AGI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 		 * buffer, we don't even need to hold it locked to read the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 		 * initial unlinked bucket entries out of the buffer. We keep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 		 * buffer reference though, so that it stays pinned in memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 		 * while we need the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 		agi = agibp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 		xfs_buf_unlock(agibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 		for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 			agino = be32_to_cpu(agi->agi_unlinked[bucket]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 			while (agino != NULLAGINO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 				agino = xlog_recover_process_one_iunlink(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 							agno, agino, bucket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 				cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 		xfs_buf_rele(agibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) xlog_unpack_data(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	struct xlog_rec_header	*rhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 	char			*dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 	struct xlog		*log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	int			i, j, k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 		  i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 		*(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 		dp += BBSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 		xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 		for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 			j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 			k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 			*(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 			dp += BBSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)  * CRC check, unpack and process a log record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) xlog_recover_process(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	struct hlist_head	rhash[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 	struct xlog_rec_header	*rhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	char			*dp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 	int			pass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	struct list_head	*buffer_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	__le32			old_crc = rhead->h_crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	__le32			crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	 * Nothing else to do if this is a CRC verification pass. Just return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	 * if this a record with a non-zero crc. Unfortunately, mkfs always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 	 * sets old_crc to 0 so we must consider this valid even on v5 supers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	 * Otherwise, return EFSBADCRC on failure so the callers up the stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 	 * know precisely what failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 	if (pass == XLOG_RECOVER_CRCPASS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 		if (old_crc && crc != old_crc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 			return -EFSBADCRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	 * We're in the normal recovery path. Issue a warning if and only if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	 * CRC in the header is non-zero. This is an advisory warning and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	 * zero CRC check prevents warnings from being emitted when upgrading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	 * the kernel from one that does not add CRCs by default.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	if (crc != old_crc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 		if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 			xfs_alert(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 		"log record CRC mismatch: found 0x%x, expected 0x%x.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 					le32_to_cpu(old_crc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 					le32_to_cpu(crc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 			xfs_hex_dump(dp, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 		 * If the filesystem is CRC enabled, this mismatch becomes a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 		 * fatal log corruption failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 		if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 			XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 			return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	xlog_unpack_data(rhead, dp, log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	return xlog_recover_process_data(log, rhash, rhead, dp, pass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 					 buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) xlog_valid_rec_header(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	struct xlog_rec_header	*rhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 	xfs_daddr_t		blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	int			bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	int			hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	if (XFS_IS_CORRUPT(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 			   rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 		return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 	if (XFS_IS_CORRUPT(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 			   (!rhead->h_version ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 			   (be32_to_cpu(rhead->h_version) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 			    (~XLOG_VERSION_OKBITS))))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 		xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 			__func__, be32_to_cpu(rhead->h_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 		return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	 * LR body must have data (or it wouldn't have been written)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	 * and h_len must not be greater than LR buffer size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 	hlen = be32_to_cpu(rhead->h_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 		return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	if (XFS_IS_CORRUPT(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 			   blkno > log->l_logBBsize || blkno > INT_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 		return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)  * Read the log from tail to head and process the log records found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917)  * Handle the two cases where the tail and head are in the same cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)  * and where the active portion of the log wraps around the end of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)  * the physical log separately.  The pass parameter is passed through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920)  * to the routines called to process the data and is not looked at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921)  * here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) xlog_do_recovery_pass(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 	xfs_daddr_t		head_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 	xfs_daddr_t		tail_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 	int			pass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 	xfs_daddr_t		*first_bad)	/* out: first bad log rec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 	xlog_rec_header_t	*rhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 	xfs_daddr_t		blk_no, rblk_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	xfs_daddr_t		rhead_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	char			*offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 	char			*hbp, *dbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	int			error = 0, h_size, h_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 	int			error2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 	int			bblks, split_bblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 	int			hblks, split_hblks, wrapped_hblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 	int			i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 	struct hlist_head	rhash[XLOG_RHASH_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 	LIST_HEAD		(buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	ASSERT(head_blk != tail_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	blk_no = rhead_blk = tail_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 	for (i = 0; i < XLOG_RHASH_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 		INIT_HLIST_HEAD(&rhash[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	 * Read the header of the tail block and get the iclog buffer size from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	 * h_size.  Use this to tell how many sectors make up the log header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 		 * When using variable length iclogs, read first sector of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 		 * iclog header and extract the header size from it.  Get a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 		 * new hbp that is the correct size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 		hbp = xlog_alloc_buffer(log, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 		if (!hbp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 		error = xlog_bread(log, tail_blk, 1, hbp, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 			goto bread_err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 		rhead = (xlog_rec_header_t *)offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 		 * xfsprogs has a bug where record length is based on lsunit but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 		 * h_size (iclog size) is hardcoded to 32k. Now that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 		 * unconditionally CRC verify the unmount record, this means the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 		 * log buffer can be too small for the record and cause an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 		 * overrun.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 		 * Detect this condition here. Use lsunit for the buffer size as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 		 * long as this looks like the mkfs case. Otherwise, return an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 		 * error to avoid a buffer overrun.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 		h_size = be32_to_cpu(rhead->h_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 		h_len = be32_to_cpu(rhead->h_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 		if (h_len > h_size && h_len <= log->l_mp->m_logbsize &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 		    rhead->h_num_logops == cpu_to_be32(1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 			xfs_warn(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 		"invalid iclog size (%d bytes), using lsunit (%d bytes)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 				 h_size, log->l_mp->m_logbsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 			h_size = log->l_mp->m_logbsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 		error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 			goto bread_err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 		hblks = xlog_logrec_hblks(log, rhead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 		if (hblks != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 			kmem_free(hbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 			hbp = xlog_alloc_buffer(log, hblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 		ASSERT(log->l_sectBBsize == 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 		hblks = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 		hbp = xlog_alloc_buffer(log, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 		h_size = XLOG_BIG_RECORD_BSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	if (!hbp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 	dbp = xlog_alloc_buffer(log, BTOBB(h_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	if (!dbp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 		kmem_free(hbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 	memset(rhash, 0, sizeof(rhash));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 	if (tail_blk > head_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 		 * Perform recovery around the end of the physical log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 		 * When the head is not on the same cycle number as the tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 		 * we can't do a sequential recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 		while (blk_no < log->l_logBBsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 			 * Check for header wrapping around physical end-of-log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 			offset = hbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 			split_hblks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 			wrapped_hblks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 			if (blk_no + hblks <= log->l_logBBsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 				/* Read header in one read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 				error = xlog_bread(log, blk_no, hblks, hbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 						   &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 				if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 					goto bread_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 				/* This LR is split across physical log end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 				if (blk_no != log->l_logBBsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 					/* some data before physical log end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 					ASSERT(blk_no <= INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 					split_hblks = log->l_logBBsize - (int)blk_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 					ASSERT(split_hblks > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 					error = xlog_bread(log, blk_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 							   split_hblks, hbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 							   &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 					if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 						goto bread_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 				 * Note: this black magic still works with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 				 * large sector sizes (non-512) only because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 				 * - we increased the buffer size originally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 				 *   by 1 sector giving us enough extra space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 				 *   for the second read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 				 * - the log start is guaranteed to be sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 				 *   aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 				 * - we read the log end (LR header start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 				 *   _first_, then the log start (LR header end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 				 *   - order is important.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 				wrapped_hblks = hblks - split_hblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 				error = xlog_bread_noalign(log, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 						wrapped_hblks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 						offset + BBTOB(split_hblks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 				if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 					goto bread_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 			rhead = (xlog_rec_header_t *)offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 			error = xlog_valid_rec_header(log, rhead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 					split_hblks ? blk_no : 0, h_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 			if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 				goto bread_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 			bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 			blk_no += hblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 			 * Read the log record data in multiple reads if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 			 * wraps around the end of the log. Note that if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 			 * header already wrapped, blk_no could point past the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 			 * end of the log. The record data is contiguous in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 			 * that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 			if (blk_no + bblks <= log->l_logBBsize ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 			    blk_no >= log->l_logBBsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 				rblk_no = xlog_wrap_logbno(log, blk_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 				error = xlog_bread(log, rblk_no, bblks, dbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 						   &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 				if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 					goto bread_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 				/* This log record is split across the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 				 * physical end of log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 				offset = dbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 				split_bblks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 				if (blk_no != log->l_logBBsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 					/* some data is before the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 					 * end of log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 					ASSERT(!wrapped_hblks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 					ASSERT(blk_no <= INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 					split_bblks =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 						log->l_logBBsize - (int)blk_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 					ASSERT(split_bblks > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 					error = xlog_bread(log, blk_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 							split_bblks, dbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) 							&offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) 					if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 						goto bread_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 				 * Note: this black magic still works with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 				 * large sector sizes (non-512) only because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 				 * - we increased the buffer size originally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 				 *   by 1 sector giving us enough extra space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 				 *   for the second read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 				 * - the log start is guaranteed to be sector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 				 *   aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 				 * - we read the log end (LR header start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 				 *   _first_, then the log start (LR header end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 				 *   - order is important.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 				error = xlog_bread_noalign(log, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 						bblks - split_bblks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 						offset + BBTOB(split_bblks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 				if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 					goto bread_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 			error = xlog_recover_process(log, rhash, rhead, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 						     pass, &buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 			if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 				goto bread_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 			blk_no += bblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 			rhead_blk = blk_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 		ASSERT(blk_no >= log->l_logBBsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 		blk_no -= log->l_logBBsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 		rhead_blk = blk_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 	/* read first part of physical log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 	while (blk_no < head_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 		error = xlog_bread(log, blk_no, hblks, hbp, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 			goto bread_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 		rhead = (xlog_rec_header_t *)offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 		error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 			goto bread_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 		/* blocks in data section */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 		bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 		error = xlog_bread(log, blk_no+hblks, bblks, dbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 				   &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 			goto bread_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 		error = xlog_recover_process(log, rhash, rhead, offset, pass,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 					     &buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 			goto bread_err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 		blk_no += bblks + hblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 		rhead_blk = blk_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)  bread_err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 	kmem_free(dbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173)  bread_err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	kmem_free(hbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 	 * Submit buffers that have been added from the last record processed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 	 * regardless of error status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	if (!list_empty(&buffer_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 		error2 = xfs_buf_delwri_submit(&buffer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	if (error && first_bad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 		*first_bad = rhead_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 	 * Transactions are freed at commit time but transactions without commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 	 * records on disk are never committed. Free any that may be left in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 	 * hash table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 	for (i = 0; i < XLOG_RHASH_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 		struct hlist_node	*tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 		struct xlog_recover	*trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 		hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 			xlog_recover_free_trans(trans);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 	return error ? error : error2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203)  * Do the recovery of the log.  We actually do this in two phases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204)  * The two passes are necessary in order to implement the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)  * of cancelling a record written into the log.  The first pass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206)  * determines those things which have been cancelled, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207)  * second pass replays log items normally except for those which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208)  * have been cancelled.  The handling of the replay and cancellations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209)  * takes place in the log item type specific routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)  * The table of items which have cancel records in the log is allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212)  * and freed at this level, since only here do we know when all of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213)  * the log recovery has been completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) xlog_do_log_recovery(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 	struct xlog	*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	xfs_daddr_t	head_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 	xfs_daddr_t	tail_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 	int		error, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	ASSERT(head_blk != tail_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 	 * First do a pass to find all of the cancelled buf log items.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 	 * Store them in the buf_cancel_table for use in the second pass.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 	log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 						 sizeof(struct list_head),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 						 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 	for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 		INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 				      XLOG_RECOVER_PASS1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 	if (error != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 		kmem_free(log->l_buf_cancel_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 		log->l_buf_cancel_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 	 * Then do a second pass to actually recover the items in the log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 	 * When it is complete free the table of buf cancel items.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 	error = xlog_do_recovery_pass(log, head_blk, tail_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 				      XLOG_RECOVER_PASS2, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 	if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 		int	i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 		for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 			ASSERT(list_empty(&log->l_buf_cancel_table[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) #endif	/* DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	kmem_free(log->l_buf_cancel_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 	log->l_buf_cancel_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)  * Do the actual recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) xlog_do_recover(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 	struct xlog		*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 	xfs_daddr_t		head_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 	xfs_daddr_t		tail_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 	struct xfs_mount	*mp = log->l_mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 	struct xfs_buf		*bp = mp->m_sb_bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 	struct xfs_sb		*sbp = &mp->m_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 	int			error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 	trace_xfs_log_recover(log, head_blk, tail_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 	 * First replay the images in the log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 	error = xlog_do_log_recovery(log, head_blk, tail_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	 * If IO errors happened during recovery, bail out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 	if (XFS_FORCED_SHUTDOWN(mp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 	 * We now update the tail_lsn since much of the recovery has completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 	 * and there may be space available to use.  If there were no extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 	 * or iunlinks, we can free up the entire log and set the tail_lsn to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 	 * be the last_sync_lsn.  This was set in xlog_find_tail to be the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 	 * lsn of the last known good LR on disk.  If there are extent frees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 	 * or iunlinks they will have some entries in the AIL; so we look at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 	 * the AIL to determine how to set the tail_lsn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	xlog_assign_tail_lsn(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 	 * Now that we've finished replaying all buffer and inode updates,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 	 * re-read the superblock and reverify it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 	xfs_buf_lock(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 	xfs_buf_hold(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 	error = _xfs_buf_read(bp, XBF_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 		if (!XFS_FORCED_SHUTDOWN(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 			xfs_buf_ioerror_alert(bp, __this_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 			ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 		xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 	/* Convert superblock from on-disk format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 	xfs_sb_from_disk(sbp, bp->b_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 	xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 	/* re-initialise in-core superblock and geometry structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 	xfs_reinit_percpu_counters(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 	error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 		xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 	mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 	xlog_recover_check_summary(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 	/* Normal transactions can now occur */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 	log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340)  * Perform recovery and re-initialize some log variables in xlog_find_tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342)  * Return error or zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) xlog_recover(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 	struct xlog	*log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 	xfs_daddr_t	head_blk, tail_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 	int		error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 	/* find the tail of the log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 	error = xlog_find_tail(log, &head_blk, &tail_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 	 * The superblock was read before the log was available and thus the LSN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 	 * could not be verified. Check the superblock LSN against the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 	 * LSN now that it's known.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 	if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 	    !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 	if (tail_blk != head_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 		/* There used to be a comment here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 		 * disallow recovery on read-only mounts.  note -- mount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 		 * checks for ENOSPC and turns it into an intelligent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 		 * error message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 		 * ...but this is no longer true.  Now, unless you specify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 		 * NORECOVERY (in which case this function would never be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 		 * called), we just go ahead and recover.  We do this all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 		 * under the vfs layer, so we can get away with it unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 		 * the device itself is read-only, in which case we fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 		if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 		 * Version 5 superblock log feature mask validation. We know the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 		 * log is dirty so check if there are any unknown log features
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 		 * in what we need to recover. If there are unknown features
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 		 * (e.g. unsupported transactions, then simply reject the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 		 * attempt at recovery before touching anything.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 		if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 		    xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 			xfs_warn(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) "Superblock has unknown incompatible log features (0x%x) enabled.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 				(log->l_mp->m_sb.sb_features_log_incompat &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 					XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 			xfs_warn(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) "The log can not be fully and/or safely recovered by this kernel.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 			xfs_warn(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) "Please recover the log on a kernel that supports the unknown features.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 		 * Delay log recovery if the debug hook is set. This is debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 		 * instrumention to coordinate simulation of I/O failures with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 		 * log recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 		if (xfs_globals.log_recovery_delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 			xfs_notice(log->l_mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 				"Delaying log recovery for %d seconds.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 				xfs_globals.log_recovery_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 			msleep(xfs_globals.log_recovery_delay * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 		xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 				log->l_mp->m_logname ? log->l_mp->m_logname
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 						     : "internal");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 		error = xlog_do_recover(log, head_blk, tail_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 		log->l_flags |= XLOG_RECOVERY_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425)  * In the first part of recovery we replay inodes and buffers and build
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426)  * up the list of extent free items which need to be processed.  Here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)  * we process the extent free items and clean up the on disk unlinked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428)  * inode lists.  This is separated from the first part of recovery so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429)  * that the root and real-time bitmap inodes can be read in from disk in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430)  * between the two stages.  This is necessary so that we can free space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431)  * in the real-time portion of the file system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) xlog_recover_finish(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 	struct xlog	*log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 	 * Now we're ready to do the transactions needed for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 	 * rest of recovery.  Start with completing all the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 	 * free intent records and then process the unlinked inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 	 * lists.  At this point, we essentially run in normal mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 	 * except that we're still performing recovery actions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 	 * rather than accepting new requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 	if (log->l_flags & XLOG_RECOVERY_NEEDED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 		int	error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 		error = xlog_recover_process_intents(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 		if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 			 * Cancel all the unprocessed intent items now so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 			 * we don't leave them pinned in the AIL.  This can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 			 * cause the AIL to livelock on the pinned item if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 			 * anyone tries to push the AIL (inode reclaim does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 			 * this) before we get around to xfs_log_mount_cancel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 			xlog_recover_cancel_intents(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 			xfs_alert(log->l_mp, "Failed to recover intents");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 		 * Sync the log to get all the intents out of the AIL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 		 * This isn't absolutely necessary, but it helps in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 		 * case the unlink transactions would have problems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 		 * pushing the intents out of the way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 		xfs_log_force(log->l_mp, XFS_LOG_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 		xlog_recover_process_iunlinks(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 		xlog_recover_check_summary(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 		xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 				log->l_mp->m_logname ? log->l_mp->m_logname
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 						     : "internal");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 		log->l_flags &= ~XLOG_RECOVERY_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 		xfs_info(log->l_mp, "Ending clean mount");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) xlog_recover_cancel(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 	struct xlog	*log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 	if (log->l_flags & XLOG_RECOVERY_NEEDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 		xlog_recover_cancel_intents(log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) #if defined(DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493)  * Read all of the agf and agi counters and check that they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494)  * are consistent with the superblock counters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) xlog_recover_check_summary(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 	struct xlog	*log)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 	xfs_mount_t	*mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 	xfs_buf_t	*agfbp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 	xfs_buf_t	*agibp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	xfs_agnumber_t	agno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 	uint64_t	freeblks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 	uint64_t	itotal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 	uint64_t	ifree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 	int		error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 	mp = log->l_mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 	freeblks = 0LL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	itotal = 0LL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 	ifree = 0LL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 	for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 		error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 		if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 			xfs_alert(mp, "%s agf read failed agno %d error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 						__func__, agno, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 			struct xfs_agf	*agfp = agfbp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 			freeblks += be32_to_cpu(agfp->agf_freeblks) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 				    be32_to_cpu(agfp->agf_flcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 			xfs_buf_relse(agfbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 		error = xfs_read_agi(mp, NULL, agno, &agibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 		if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 			xfs_alert(mp, "%s agi read failed agno %d error %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 						__func__, agno, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 			struct xfs_agi	*agi = agibp->b_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 			itotal += be32_to_cpu(agi->agi_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 			ifree += be32_to_cpu(agi->agi_freecount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 			xfs_buf_relse(agibp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) #endif /* DEBUG */