^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2016 Oracle. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Darrick J. Wong <darrick.wong@oracle.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xfs_log_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "xfs_bit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "xfs_defer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "xfs_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "xfs_trans_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "xfs_refcount_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "xfs_log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "xfs_refcount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "xfs_error.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "xfs_log_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "xfs_log_recover.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) kmem_zone_t *xfs_cui_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) kmem_zone_t *xfs_cud_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static const struct xfs_item_ops xfs_cui_item_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return container_of(lip, struct xfs_cui_log_item, cui_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) xfs_cui_item_free(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct xfs_cui_log_item *cuip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) kmem_free(cuip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) kmem_cache_free(xfs_cui_zone, cuip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Freeing the CUI requires that we remove it from the AIL if it has already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * been placed there. However, the CUI may not yet have been placed in the AIL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * when called by xfs_cui_release() from CUD processing due to the ordering of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * committed vs unpin operations in bulk insert operations. Hence the reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * count to ensure only the last caller frees the CUI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) xfs_cui_release(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct xfs_cui_log_item *cuip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ASSERT(atomic_read(&cuip->cui_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (atomic_dec_and_test(&cuip->cui_refcount)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) xfs_trans_ail_delete(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) xfs_cui_item_free(cuip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) xfs_cui_item_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct xfs_log_item *lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int *nvecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) int *nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) *nvecs += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * This is called to fill in the vector of log iovecs for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * given cui log item. We use only 1 iovec, and we point that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * at the cui_log_format structure embedded in the cui item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * It is at this point that we assert that all of the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * slots in the cui item have been filled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) xfs_cui_item_format(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct xfs_log_item *lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct xfs_log_vec *lv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct xfs_log_iovec *vecp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) ASSERT(atomic_read(&cuip->cui_next_extent) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) cuip->cui_format.cui_nextents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) cuip->cui_format.cui_type = XFS_LI_CUI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) cuip->cui_format.cui_size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * The unpin operation is the last place an CUI is manipulated in the log. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * either inserted in the AIL or aborted in the event of a log I/O error. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * either case, the CUI transaction has been successfully committed to make it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * this far. Therefore, we expect whoever committed the CUI to either construct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * and commit the CUD or drop the CUD's reference in the event of error. Simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * drop the log's CUI reference now that the log is done with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) xfs_cui_item_unpin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct xfs_log_item *lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) xfs_cui_release(cuip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * The CUI has been either committed or aborted if the transaction has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * cancelled. If the transaction was cancelled, an CUD isn't going to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * constructed and thus we free the CUI here directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) xfs_cui_item_release(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct xfs_log_item *lip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) xfs_cui_release(CUI_ITEM(lip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * Allocate and initialize an cui item with the given number of extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) STATIC struct xfs_cui_log_item *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) xfs_cui_init(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) uint nextents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct xfs_cui_log_item *cuip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ASSERT(nextents > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) cuip = kmem_cache_zalloc(xfs_cui_zone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) GFP_KERNEL | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) cuip->cui_format.cui_nextents = nextents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) atomic_set(&cuip->cui_next_extent, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) atomic_set(&cuip->cui_refcount, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return cuip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return container_of(lip, struct xfs_cud_log_item, cud_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) xfs_cud_item_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct xfs_log_item *lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) int *nvecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) int *nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) *nvecs += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) *nbytes += sizeof(struct xfs_cud_log_format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * This is called to fill in the vector of log iovecs for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * given cud log item. We use only 1 iovec, and we point that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * at the cud_log_format structure embedded in the cud item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * It is at this point that we assert that all of the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * slots in the cud item have been filled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) xfs_cud_item_format(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct xfs_log_item *lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct xfs_log_vec *lv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct xfs_log_iovec *vecp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) cudp->cud_format.cud_type = XFS_LI_CUD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) cudp->cud_format.cud_size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) sizeof(struct xfs_cud_log_format));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * The CUD is either committed or aborted if the transaction is cancelled. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * the transaction is cancelled, drop our reference to the CUI and free the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * CUD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) xfs_cud_item_release(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct xfs_log_item *lip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) xfs_cui_release(cudp->cud_cuip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) kmem_cache_free(xfs_cud_zone, cudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static const struct xfs_item_ops xfs_cud_item_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) .iop_size = xfs_cud_item_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) .iop_format = xfs_cud_item_format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) .iop_release = xfs_cud_item_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static struct xfs_cud_log_item *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) xfs_trans_get_cud(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct xfs_cui_log_item *cuip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct xfs_cud_log_item *cudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) cudp = kmem_cache_zalloc(xfs_cud_zone, GFP_KERNEL | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) &xfs_cud_item_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) cudp->cud_cuip = cuip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) xfs_trans_add_item(tp, &cudp->cud_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return cudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * Finish an refcount update and log it to the CUD. Note that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * transaction is marked dirty regardless of whether the refcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * update succeeds or fails to support the CUI/CUD lifecycle rules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) xfs_trans_log_finish_refcount_update(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct xfs_cud_log_item *cudp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) enum xfs_refcount_intent_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) xfs_fsblock_t startblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) xfs_extlen_t blockcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) xfs_fsblock_t *new_fsb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) xfs_extlen_t *new_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct xfs_btree_cur **pcur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) error = xfs_refcount_finish_one(tp, type, startblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) blockcount, new_fsb, new_len, pcur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * Mark the transaction dirty, even on error. This ensures the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * transaction is aborted, which:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * 1.) releases the CUI and frees the CUD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * 2.) shuts down the filesystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) tp->t_flags |= XFS_TRANS_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* Sort refcount intents by AG. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) xfs_refcount_update_diff_items(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) void *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct list_head *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct list_head *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) struct xfs_mount *mp = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) struct xfs_refcount_intent *ra;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct xfs_refcount_intent *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ra = container_of(a, struct xfs_refcount_intent, ri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) rb = container_of(b, struct xfs_refcount_intent, ri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return XFS_FSB_TO_AGNO(mp, ra->ri_startblock) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* Set the phys extent flags for this reverse mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) xfs_trans_set_refcount_flags(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct xfs_phys_extent *refc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) enum xfs_refcount_intent_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) refc->pe_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) case XFS_REFCOUNT_INCREASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) case XFS_REFCOUNT_DECREASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) case XFS_REFCOUNT_ALLOC_COW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) case XFS_REFCOUNT_FREE_COW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) refc->pe_flags |= type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Log refcount updates in the intent item. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) xfs_refcount_update_log_item(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct xfs_cui_log_item *cuip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct xfs_refcount_intent *refc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) uint next_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct xfs_phys_extent *ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) tp->t_flags |= XFS_TRANS_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * atomic_inc_return gives us the value after the increment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * we want to use it as an array index so we need to subtract 1 from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ASSERT(next_extent < cuip->cui_format.cui_nextents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ext = &cuip->cui_format.cui_extents[next_extent];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ext->pe_startblock = refc->ri_startblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ext->pe_len = refc->ri_blockcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) xfs_trans_set_refcount_flags(ext, refc->ri_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static struct xfs_log_item *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) xfs_refcount_update_create_intent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct list_head *items,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) bool sort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct xfs_mount *mp = tp->t_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) struct xfs_cui_log_item *cuip = xfs_cui_init(mp, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) struct xfs_refcount_intent *refc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) ASSERT(count > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) xfs_trans_add_item(tp, &cuip->cui_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (sort)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) list_sort(mp, items, xfs_refcount_update_diff_items);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) list_for_each_entry(refc, items, ri_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) xfs_refcount_update_log_item(tp, cuip, refc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return &cuip->cui_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /* Get an CUD so we can process all the deferred refcount updates. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static struct xfs_log_item *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) xfs_refcount_update_create_done(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct xfs_log_item *intent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return &xfs_trans_get_cud(tp, CUI_ITEM(intent))->cud_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* Process a deferred refcount update. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) xfs_refcount_update_finish_item(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct xfs_log_item *done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) struct list_head *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct xfs_btree_cur **state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct xfs_refcount_intent *refc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) xfs_fsblock_t new_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) xfs_extlen_t new_aglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) refc = container_of(item, struct xfs_refcount_intent, ri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) refc->ri_type, refc->ri_startblock, refc->ri_blockcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) &new_fsb, &new_aglen, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /* Did we run out of reservation? Requeue what we didn't finish. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (!error && new_aglen > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) refc->ri_type == XFS_REFCOUNT_DECREASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) refc->ri_startblock = new_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) refc->ri_blockcount = new_aglen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) kmem_free(refc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Abort all pending CUIs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) xfs_refcount_update_abort_intent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct xfs_log_item *intent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) xfs_cui_release(CUI_ITEM(intent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /* Cancel a deferred refcount update. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) xfs_refcount_update_cancel_item(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct list_head *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct xfs_refcount_intent *refc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) refc = container_of(item, struct xfs_refcount_intent, ri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) kmem_free(refc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) .max_items = XFS_CUI_MAX_FAST_EXTENTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) .create_intent = xfs_refcount_update_create_intent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) .abort_intent = xfs_refcount_update_abort_intent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) .create_done = xfs_refcount_update_create_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) .finish_item = xfs_refcount_update_finish_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) .finish_cleanup = xfs_refcount_finish_one_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) .cancel_item = xfs_refcount_update_cancel_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * Process a refcount update intent item that was recovered from the log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * We need to update the refcountbt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) xfs_cui_item_recover(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct xfs_log_item *lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct list_head *capture_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct xfs_bmbt_irec irec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct xfs_phys_extent *refc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct xfs_cud_log_item *cudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct xfs_trans *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct xfs_btree_cur *rcur = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct xfs_mount *mp = lip->li_mountp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) xfs_fsblock_t startblock_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) xfs_fsblock_t new_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) xfs_extlen_t new_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) unsigned int refc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) bool op_ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) bool requeue_only = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) enum xfs_refcount_intent_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * First check the validity of the extents described by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * CUI. If any are bad, then assume that all are bad and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * just toss the CUI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) refc = &cuip->cui_format.cui_extents[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) startblock_fsb = XFS_BB_TO_FSB(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) XFS_FSB_TO_DADDR(mp, refc->pe_startblock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) switch (refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) case XFS_REFCOUNT_INCREASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) case XFS_REFCOUNT_DECREASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) case XFS_REFCOUNT_ALLOC_COW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) case XFS_REFCOUNT_FREE_COW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) op_ok = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) op_ok = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (!op_ok || startblock_fsb == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) refc->pe_len == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) startblock_fsb >= mp->m_sb.sb_dblocks ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) refc->pe_len >= mp->m_sb.sb_agblocks ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) (refc->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * Under normal operation, refcount updates are deferred, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * wouldn't be adding them directly to a transaction. All
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * refcount updates manage reservation usage internally and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * dynamically by deferring work that won't fit in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * transaction. Normally, any work that needs to be deferred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * gets attached to the same defer_ops that scheduled the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * refcount update. However, we're in log recovery here, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * use the passed in defer_ops and to finish up any work that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * doesn't fit. We need to reserve enough blocks to handle a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * full btree split on either end of the refcount range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) cudp = xfs_trans_get_cud(tp, cuip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) refc = &cuip->cui_format.cui_extents[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) switch (refc_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) case XFS_REFCOUNT_INCREASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) case XFS_REFCOUNT_DECREASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) case XFS_REFCOUNT_ALLOC_COW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) case XFS_REFCOUNT_FREE_COW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) type = refc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) goto abort_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (requeue_only) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) new_fsb = refc->pe_startblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) new_len = refc->pe_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) error = xfs_trans_log_finish_refcount_update(tp, cudp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) type, refc->pe_startblock, refc->pe_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) &new_fsb, &new_len, &rcur);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) goto abort_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* Requeue what we didn't finish. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (new_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) irec.br_startblock = new_fsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) irec.br_blockcount = new_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) case XFS_REFCOUNT_INCREASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) xfs_refcount_increase_extent(tp, &irec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) case XFS_REFCOUNT_DECREASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) xfs_refcount_decrease_extent(tp, &irec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) case XFS_REFCOUNT_ALLOC_COW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) xfs_refcount_alloc_cow_extent(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) irec.br_startblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) irec.br_blockcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) case XFS_REFCOUNT_FREE_COW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) xfs_refcount_free_cow_extent(tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) irec.br_startblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) irec.br_blockcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) ASSERT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) requeue_only = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) xfs_refcount_finish_one_cleanup(tp, rcur, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) abort_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) xfs_refcount_finish_one_cleanup(tp, rcur, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) xfs_trans_cancel(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) STATIC bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) xfs_cui_item_match(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct xfs_log_item *lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) uint64_t intent_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) /* Relog an intent item to push the log tail forward. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static struct xfs_log_item *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) xfs_cui_item_relog(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct xfs_log_item *intent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct xfs_trans *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct xfs_cud_log_item *cudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct xfs_cui_log_item *cuip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct xfs_phys_extent *extp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) count = CUI_ITEM(intent)->cui_format.cui_nextents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) extp = CUI_ITEM(intent)->cui_format.cui_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) tp->t_flags |= XFS_TRANS_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) cudp = xfs_trans_get_cud(tp, CUI_ITEM(intent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) cuip = xfs_cui_init(tp->t_mountp, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) memcpy(cuip->cui_format.cui_extents, extp, count * sizeof(*extp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) atomic_set(&cuip->cui_next_extent, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) xfs_trans_add_item(tp, &cuip->cui_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) return &cuip->cui_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) static const struct xfs_item_ops xfs_cui_item_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) .iop_size = xfs_cui_item_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) .iop_format = xfs_cui_item_format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) .iop_unpin = xfs_cui_item_unpin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) .iop_release = xfs_cui_item_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) .iop_recover = xfs_cui_item_recover,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) .iop_match = xfs_cui_item_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) .iop_relog = xfs_cui_item_relog,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * Copy an CUI format buffer from the given buf, and into the destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * CUI format structure. The CUI/CUD items were designed not to need any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * special alignment handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) xfs_cui_copy_format(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct xfs_log_iovec *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct xfs_cui_log_format *dst_cui_fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct xfs_cui_log_format *src_cui_fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) uint len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) src_cui_fmt = buf->i_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (buf->i_len == len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) memcpy(dst_cui_fmt, src_cui_fmt, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * This routine is called to create an in-core extent refcount update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * item from the cui format structure which was logged on disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * It allocates an in-core cui, copies the extents from the format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * structure into it, and adds the cui to the AIL with the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * LSN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) xlog_recover_cui_commit_pass2(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct xlog *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) struct list_head *buffer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) struct xlog_recover_item *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) xfs_lsn_t lsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) struct xfs_mount *mp = log->l_mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) struct xfs_cui_log_item *cuip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct xfs_cui_log_format *cui_formatp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) cui_formatp = item->ri_buf[0].i_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) xfs_cui_item_free(cuip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * Insert the intent into the AIL directly and drop one reference so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * that finishing or canceling the work will drop the other.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) xfs_cui_release(cuip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) const struct xlog_recover_item_ops xlog_cui_item_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) .item_type = XFS_LI_CUI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) .commit_pass2 = xlog_recover_cui_commit_pass2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * This routine is called when an CUD format structure is found in a committed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * transaction in the log. Its purpose is to cancel the corresponding CUI if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) * was still in the log. To do this it searches the AIL for the CUI with an id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * equal to that in the CUD format structure. If we find it we drop the CUD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * reference, which removes the CUI from the AIL and frees it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) STATIC int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) xlog_recover_cud_commit_pass2(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct xlog *log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) struct list_head *buffer_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) struct xlog_recover_item *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) xfs_lsn_t lsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct xfs_cud_log_format *cud_formatp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) cud_formatp = item->ri_buf[0].i_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) const struct xlog_recover_item_ops xlog_cud_item_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) .item_type = XFS_LI_CUD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) .commit_pass2 = xlog_recover_cud_commit_pass2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) };