Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2016 Oracle.  All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Author: Darrick J. Wong <darrick.wong@oracle.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include "xfs_log_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include "xfs_bit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include "xfs_defer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include "xfs_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "xfs_trans_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include "xfs_rmap_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include "xfs_log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "xfs_rmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "xfs_error.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "xfs_log_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "xfs_log_recover.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) kmem_zone_t	*xfs_rui_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) kmem_zone_t	*xfs_rud_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static const struct xfs_item_ops xfs_rui_item_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	return container_of(lip, struct xfs_rui_log_item, rui_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) xfs_rui_item_free(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct xfs_rui_log_item	*ruip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		kmem_free(ruip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		kmem_cache_free(xfs_rui_zone, ruip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * Freeing the RUI requires that we remove it from the AIL if it has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * been placed there. However, the RUI may not yet have been placed in the AIL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * when called by xfs_rui_release() from RUD processing due to the ordering of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * committed vs unpin operations in bulk insert operations. Hence the reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * count to ensure only the last caller frees the RUI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) xfs_rui_release(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	struct xfs_rui_log_item	*ruip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	ASSERT(atomic_read(&ruip->rui_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	if (atomic_dec_and_test(&ruip->rui_refcount)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		xfs_trans_ail_delete(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		xfs_rui_item_free(ruip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) xfs_rui_item_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	struct xfs_log_item	*lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	int			*nvecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	int			*nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	*nvecs += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	*nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * This is called to fill in the vector of log iovecs for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  * given rui log item. We use only 1 iovec, and we point that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * at the rui_log_format structure embedded in the rui item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * It is at this point that we assert that all of the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * slots in the rui item have been filled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) xfs_rui_item_format(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	struct xfs_log_item	*lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	struct xfs_log_vec	*lv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	struct xfs_log_iovec	*vecp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	ASSERT(atomic_read(&ruip->rui_next_extent) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 			ruip->rui_format.rui_nextents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	ruip->rui_format.rui_type = XFS_LI_RUI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	ruip->rui_format.rui_size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * The unpin operation is the last place an RUI is manipulated in the log. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * either inserted in the AIL or aborted in the event of a log I/O error. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * either case, the RUI transaction has been successfully committed to make it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * this far. Therefore, we expect whoever committed the RUI to either construct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * and commit the RUD or drop the RUD's reference in the event of error. Simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * drop the log's RUI reference now that the log is done with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) xfs_rui_item_unpin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct xfs_log_item	*lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	int			remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	struct xfs_rui_log_item	*ruip = RUI_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	xfs_rui_release(ruip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  * The RUI has been either committed or aborted if the transaction has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  * cancelled. If the transaction was cancelled, an RUD isn't going to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  * constructed and thus we free the RUI here directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) xfs_rui_item_release(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	struct xfs_log_item	*lip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	xfs_rui_release(RUI_ITEM(lip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * Allocate and initialize an rui item with the given number of extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) STATIC struct xfs_rui_log_item *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) xfs_rui_init(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	struct xfs_mount		*mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	uint				nextents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	struct xfs_rui_log_item		*ruip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	ASSERT(nextents > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		ruip = kmem_cache_zalloc(xfs_rui_zone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 					 GFP_KERNEL | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	ruip->rui_format.rui_nextents = nextents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	atomic_set(&ruip->rui_next_extent, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	atomic_set(&ruip->rui_refcount, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	return ruip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * Copy an RUI format buffer from the given buf, and into the destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * RUI format structure.  The RUI/RUD items were designed not to need any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * special alignment handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) xfs_rui_copy_format(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	struct xfs_log_iovec		*buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	struct xfs_rui_log_format	*dst_rui_fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	struct xfs_rui_log_format	*src_rui_fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	uint				len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	src_rui_fmt = buf->i_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (buf->i_len != len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	memcpy(dst_rui_fmt, src_rui_fmt, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	return container_of(lip, struct xfs_rud_log_item, rud_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) xfs_rud_item_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	struct xfs_log_item	*lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	int			*nvecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	int			*nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	*nvecs += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	*nbytes += sizeof(struct xfs_rud_log_format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * This is called to fill in the vector of log iovecs for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * given rud log item. We use only 1 iovec, and we point that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * at the rud_log_format structure embedded in the rud item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  * It is at this point that we assert that all of the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * slots in the rud item have been filled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) xfs_rud_item_format(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	struct xfs_log_item	*lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	struct xfs_log_vec	*lv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	struct xfs_log_iovec	*vecp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	rudp->rud_format.rud_type = XFS_LI_RUD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	rudp->rud_format.rud_size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			sizeof(struct xfs_rud_log_format));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  * The RUD is either committed or aborted if the transaction is cancelled. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)  * the transaction is cancelled, drop our reference to the RUI and free the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  * RUD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) xfs_rud_item_release(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	struct xfs_log_item	*lip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	struct xfs_rud_log_item	*rudp = RUD_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	xfs_rui_release(rudp->rud_ruip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	kmem_cache_free(xfs_rud_zone, rudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static const struct xfs_item_ops xfs_rud_item_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	.flags		= XFS_ITEM_RELEASE_WHEN_COMMITTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	.iop_size	= xfs_rud_item_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	.iop_format	= xfs_rud_item_format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	.iop_release	= xfs_rud_item_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static struct xfs_rud_log_item *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) xfs_trans_get_rud(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	struct xfs_trans		*tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	struct xfs_rui_log_item		*ruip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	struct xfs_rud_log_item		*rudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	rudp = kmem_cache_zalloc(xfs_rud_zone, GFP_KERNEL | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			  &xfs_rud_item_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	rudp->rud_ruip = ruip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	xfs_trans_add_item(tp, &rudp->rud_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	return rudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Set the map extent flags for this reverse mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) xfs_trans_set_rmap_flags(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	struct xfs_map_extent		*rmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	enum xfs_rmap_intent_type	type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	int				whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	xfs_exntst_t			state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	rmap->me_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (state == XFS_EXT_UNWRITTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (whichfork == XFS_ATTR_FORK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	case XFS_RMAP_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	case XFS_RMAP_MAP_SHARED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		rmap->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	case XFS_RMAP_UNMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	case XFS_RMAP_UNMAP_SHARED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	case XFS_RMAP_CONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	case XFS_RMAP_CONVERT_SHARED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	case XFS_RMAP_ALLOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	case XFS_RMAP_FREE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  * Finish an rmap update and log it to the RUD. Note that the transaction is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  * marked dirty regardless of whether the rmap update succeeds or fails to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  * support the RUI/RUD lifecycle rules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) xfs_trans_log_finish_rmap_update(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct xfs_trans		*tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	struct xfs_rud_log_item		*rudp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	enum xfs_rmap_intent_type	type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	uint64_t			owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	int				whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	xfs_fileoff_t			startoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	xfs_fsblock_t			startblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	xfs_filblks_t			blockcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	xfs_exntst_t			state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	struct xfs_btree_cur		**pcur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	int				error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 			startblock, blockcount, state, pcur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	 * Mark the transaction dirty, even on error. This ensures the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	 * transaction is aborted, which:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	 * 1.) releases the RUI and frees the RUD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	 * 2.) shuts down the filesystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	tp->t_flags |= XFS_TRANS_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /* Sort rmap intents by AG. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) xfs_rmap_update_diff_items(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	void				*priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	struct list_head		*a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	struct list_head		*b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	struct xfs_mount		*mp = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	struct xfs_rmap_intent		*ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	struct xfs_rmap_intent		*rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	ra = container_of(a, struct xfs_rmap_intent, ri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	rb = container_of(b, struct xfs_rmap_intent, ri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	return  XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* Log rmap updates in the intent item. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) xfs_rmap_update_log_item(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	struct xfs_trans		*tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	struct xfs_rui_log_item		*ruip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	struct xfs_rmap_intent		*rmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	uint				next_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	struct xfs_map_extent		*map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	tp->t_flags |= XFS_TRANS_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	 * atomic_inc_return gives us the value after the increment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	 * we want to use it as an array index so we need to subtract 1 from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	 * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	ASSERT(next_extent < ruip->rui_format.rui_nextents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	map = &ruip->rui_format.rui_extents[next_extent];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	map->me_owner = rmap->ri_owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	map->me_startblock = rmap->ri_bmap.br_startblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	map->me_startoff = rmap->ri_bmap.br_startoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	map->me_len = rmap->ri_bmap.br_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			rmap->ri_bmap.br_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static struct xfs_log_item *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) xfs_rmap_update_create_intent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	struct xfs_trans		*tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	struct list_head		*items,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	unsigned int			count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	bool				sort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	struct xfs_mount		*mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	struct xfs_rui_log_item		*ruip = xfs_rui_init(mp, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	struct xfs_rmap_intent		*rmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	ASSERT(count > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	xfs_trans_add_item(tp, &ruip->rui_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	if (sort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		list_sort(mp, items, xfs_rmap_update_diff_items);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	list_for_each_entry(rmap, items, ri_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		xfs_rmap_update_log_item(tp, ruip, rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	return &ruip->rui_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* Get an RUD so we can process all the deferred rmap updates. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static struct xfs_log_item *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) xfs_rmap_update_create_done(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	struct xfs_trans		*tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	struct xfs_log_item		*intent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	unsigned int			count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	return &xfs_trans_get_rud(tp, RUI_ITEM(intent))->rud_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* Process a deferred rmap update. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) xfs_rmap_update_finish_item(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	struct xfs_trans		*tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	struct xfs_log_item		*done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	struct list_head		*item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	struct xfs_btree_cur		**state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	struct xfs_rmap_intent		*rmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	int				error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	rmap = container_of(item, struct xfs_rmap_intent, ri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	error = xfs_trans_log_finish_rmap_update(tp, RUD_ITEM(done),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			rmap->ri_type, rmap->ri_owner, rmap->ri_whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 			rmap->ri_bmap.br_startoff, rmap->ri_bmap.br_startblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 			rmap->ri_bmap.br_blockcount, rmap->ri_bmap.br_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 			state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	kmem_free(rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /* Abort all pending RUIs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) xfs_rmap_update_abort_intent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	struct xfs_log_item	*intent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	xfs_rui_release(RUI_ITEM(intent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /* Cancel a deferred rmap update. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) xfs_rmap_update_cancel_item(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	struct list_head		*item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	struct xfs_rmap_intent		*rmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	rmap = container_of(item, struct xfs_rmap_intent, ri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	kmem_free(rmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	.max_items	= XFS_RUI_MAX_FAST_EXTENTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	.create_intent	= xfs_rmap_update_create_intent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	.abort_intent	= xfs_rmap_update_abort_intent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	.create_done	= xfs_rmap_update_create_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	.finish_item	= xfs_rmap_update_finish_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	.finish_cleanup = xfs_rmap_finish_one_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	.cancel_item	= xfs_rmap_update_cancel_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)  * Process an rmap update intent item that was recovered from the log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)  * We need to update the rmapbt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) xfs_rui_item_recover(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	struct xfs_log_item		*lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	struct list_head		*capture_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	struct xfs_rui_log_item		*ruip = RUI_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	struct xfs_map_extent		*rmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	struct xfs_rud_log_item		*rudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	struct xfs_trans		*tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	struct xfs_btree_cur		*rcur = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	struct xfs_mount		*mp = lip->li_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	xfs_fsblock_t			startblock_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	enum xfs_rmap_intent_type	type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	xfs_exntst_t			state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	bool				op_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	int				i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	int				whichfork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	int				error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	 * First check the validity of the extents described by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	 * RUI.  If any are bad, then assume that all are bad and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	 * just toss the RUI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		rmap = &ruip->rui_format.rui_extents[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		startblock_fsb = XFS_BB_TO_FSB(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 				   XFS_FSB_TO_DADDR(mp, rmap->me_startblock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		case XFS_RMAP_EXTENT_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		case XFS_RMAP_EXTENT_MAP_SHARED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		case XFS_RMAP_EXTENT_UNMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		case XFS_RMAP_EXTENT_UNMAP_SHARED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		case XFS_RMAP_EXTENT_CONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		case XFS_RMAP_EXTENT_CONVERT_SHARED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		case XFS_RMAP_EXTENT_ALLOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		case XFS_RMAP_EXTENT_FREE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 			op_ok = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 			op_ok = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		if (!op_ok || startblock_fsb == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		    rmap->me_len == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		    startblock_fsb >= mp->m_sb.sb_dblocks ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		    rmap->me_len >= mp->m_sb.sb_agblocks ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		    (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 			return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 			mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	rudp = xfs_trans_get_rud(tp, ruip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		rmap = &ruip->rui_format.rui_extents[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 				XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 				XFS_ATTR_FORK : XFS_DATA_FORK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		case XFS_RMAP_EXTENT_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 			type = XFS_RMAP_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		case XFS_RMAP_EXTENT_MAP_SHARED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 			type = XFS_RMAP_MAP_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		case XFS_RMAP_EXTENT_UNMAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 			type = XFS_RMAP_UNMAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		case XFS_RMAP_EXTENT_UNMAP_SHARED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 			type = XFS_RMAP_UNMAP_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		case XFS_RMAP_EXTENT_CONVERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 			type = XFS_RMAP_CONVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		case XFS_RMAP_EXTENT_CONVERT_SHARED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 			type = XFS_RMAP_CONVERT_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		case XFS_RMAP_EXTENT_ALLOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 			type = XFS_RMAP_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		case XFS_RMAP_EXTENT_FREE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 			type = XFS_RMAP_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 			XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 			error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 			goto abort_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		error = xfs_trans_log_finish_rmap_update(tp, rudp, type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 				rmap->me_owner, whichfork,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 				rmap->me_startoff, rmap->me_startblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 				rmap->me_len, state, &rcur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 			goto abort_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	xfs_rmap_finish_one_cleanup(tp, rcur, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) abort_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	xfs_rmap_finish_one_cleanup(tp, rcur, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	xfs_trans_cancel(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) STATIC bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) xfs_rui_item_match(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	struct xfs_log_item	*lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	uint64_t		intent_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	return RUI_ITEM(lip)->rui_format.rui_id == intent_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /* Relog an intent item to push the log tail forward. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static struct xfs_log_item *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) xfs_rui_item_relog(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	struct xfs_log_item		*intent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 	struct xfs_trans		*tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	struct xfs_rud_log_item		*rudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	struct xfs_rui_log_item		*ruip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	struct xfs_map_extent		*extp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	unsigned int			count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	count = RUI_ITEM(intent)->rui_format.rui_nextents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	extp = RUI_ITEM(intent)->rui_format.rui_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	tp->t_flags |= XFS_TRANS_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	rudp = xfs_trans_get_rud(tp, RUI_ITEM(intent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	ruip = xfs_rui_init(tp->t_mountp, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	memcpy(ruip->rui_format.rui_extents, extp, count * sizeof(*extp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	atomic_set(&ruip->rui_next_extent, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	xfs_trans_add_item(tp, &ruip->rui_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	return &ruip->rui_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) static const struct xfs_item_ops xfs_rui_item_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	.iop_size	= xfs_rui_item_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	.iop_format	= xfs_rui_item_format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	.iop_unpin	= xfs_rui_item_unpin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	.iop_release	= xfs_rui_item_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	.iop_recover	= xfs_rui_item_recover,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	.iop_match	= xfs_rui_item_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	.iop_relog	= xfs_rui_item_relog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)  * This routine is called to create an in-core extent rmap update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)  * item from the rui format structure which was logged on disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)  * It allocates an in-core rui, copies the extents from the format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)  * structure into it, and adds the rui to the AIL with the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)  * LSN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) xlog_recover_rui_commit_pass2(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	struct xlog			*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	struct list_head		*buffer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	struct xlog_recover_item	*item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	xfs_lsn_t			lsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	int				error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	struct xfs_mount		*mp = log->l_mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	struct xfs_rui_log_item		*ruip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	struct xfs_rui_log_format	*rui_formatp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	rui_formatp = item->ri_buf[0].i_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		xfs_rui_item_free(ruip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	 * Insert the intent into the AIL directly and drop one reference so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	 * that finishing or canceling the work will drop the other.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	xfs_trans_ail_insert(log->l_ailp, &ruip->rui_item, lsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	xfs_rui_release(ruip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) const struct xlog_recover_item_ops xlog_rui_item_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	.item_type		= XFS_LI_RUI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	.commit_pass2		= xlog_recover_rui_commit_pass2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)  * This routine is called when an RUD format structure is found in a committed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)  * transaction in the log. Its purpose is to cancel the corresponding RUI if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)  * was still in the log. To do this it searches the AIL for the RUI with an id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)  * equal to that in the RUD format structure. If we find it we drop the RUD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)  * reference, which removes the RUI from the AIL and frees it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) xlog_recover_rud_commit_pass2(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	struct xlog			*log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	struct list_head		*buffer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	struct xlog_recover_item	*item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	xfs_lsn_t			lsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	struct xfs_rud_log_format	*rud_formatp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	rud_formatp = item->ri_buf[0].i_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) const struct xlog_recover_item_ops xlog_rud_item_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	.item_type		= XFS_LI_RUD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	.commit_pass2		= xlog_recover_rud_commit_pass2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) };