^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "xfs_log_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "xfs_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "xfs_buf_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "xfs_trans_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "xfs_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Check to see if a buffer matching the given parameters is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * a part of the given transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) STATIC struct xfs_buf *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) xfs_trans_buf_item_match(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct xfs_buftarg *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct xfs_buf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) int nmaps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct xfs_log_item *lip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct xfs_buf_log_item *blip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) int len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) for (i = 0; i < nmaps; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) len += map[i].bm_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) list_for_each_entry(lip, &tp->t_items, li_trans) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) blip = (struct xfs_buf_log_item *)lip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (blip->bli_item.li_type == XFS_LI_BUF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) blip->bli_buf->b_target == target &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) blip->bli_buf->b_length == len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) ASSERT(blip->bli_buf->b_map_count == nmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return blip->bli_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * Add the locked buffer to the transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * The buffer must be locked, and it cannot be associated with any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * If the buffer does not yet have a buf log item associated with it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * then allocate one for it. Then add the buf item to the transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) _xfs_trans_bjoin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct xfs_buf *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) int reset_recur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct xfs_buf_log_item *bip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ASSERT(bp->b_transp == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * The xfs_buf_log_item pointer is stored in b_log_item. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * it doesn't have one yet, then allocate one and initialize it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * The checks to see if one is there are in xfs_buf_item_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) xfs_buf_item_init(bp, tp->t_mountp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (reset_recur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) bip->bli_recur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Take a reference for this transaction on the buf item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) atomic_inc(&bip->bli_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Attach the item to the transaction so we can find it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * xfs_trans_get_buf() and friends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) xfs_trans_add_item(tp, &bip->bli_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) bp->b_transp = tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) xfs_trans_bjoin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) _xfs_trans_bjoin(tp, bp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) trace_xfs_trans_bjoin(bp->b_log_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Get and lock the buffer for the caller if it is not already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * locked within the given transaction. If it is already locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * within the transaction, just increment its lock recursion count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * and return a pointer to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * If the transaction pointer is NULL, make this just a normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * get_buf() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) xfs_trans_get_buf_map(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct xfs_buftarg *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct xfs_buf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int nmaps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) xfs_buf_flags_t flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct xfs_buf **bpp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) xfs_buf_t *bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct xfs_buf_log_item *bip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *bpp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (!tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return xfs_buf_get_map(target, map, nmaps, flags, bpp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * If we find the buffer in the cache with this transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * pointer in its b_fsprivate2 field, then we know we already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * have it locked. In this case we just increment the lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * recursion count and return the buffer to the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (bp != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) ASSERT(xfs_buf_islocked(bp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) xfs_buf_stale(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) bp->b_flags |= XBF_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ASSERT(bp->b_transp == tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ASSERT(bip != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) bip->bli_recur++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) trace_xfs_trans_get_buf_recur(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) *bpp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) error = xfs_buf_get_map(target, map, nmaps, flags, &bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) ASSERT(!bp->b_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) _xfs_trans_bjoin(tp, bp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) trace_xfs_trans_get_buf(bp->b_log_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *bpp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * Get and lock the superblock buffer for the given transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct xfs_buf *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) xfs_trans_getsb(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct xfs_trans *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct xfs_buf *bp = tp->t_mountp->m_sb_bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Just increment the lock recursion count if the buffer is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * attached to this transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (bp->b_transp == tp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ASSERT(bip != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) bip->bli_recur++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) trace_xfs_trans_getsb_recur(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) xfs_buf_lock(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) xfs_buf_hold(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) _xfs_trans_bjoin(tp, bp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) trace_xfs_trans_getsb(bp->b_log_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * Get and lock the buffer for the caller if it is not already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * locked within the given transaction. If it has not yet been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * read in, read it from disk. If it is already locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * within the transaction and already read in, just increment its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * lock recursion count and return a pointer to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * If the transaction pointer is NULL, make this just a normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * read_buf() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) xfs_trans_read_buf_map(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct xfs_mount *mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct xfs_buftarg *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct xfs_buf_map *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) int nmaps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) xfs_buf_flags_t flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct xfs_buf **bpp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) const struct xfs_buf_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct xfs_buf *bp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct xfs_buf_log_item *bip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) *bpp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * If we find the buffer in the cache with this transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * pointer in its b_fsprivate2 field, then we know we already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) * have it locked. If it is already read in we just increment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) * the lock recursion count and return the buffer to the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * If the buffer is not yet read in, then we read it in, increment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * the lock recursion count, and return it to the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (bp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ASSERT(xfs_buf_islocked(bp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) ASSERT(bp->b_transp == tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ASSERT(bp->b_log_item != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) ASSERT(!bp->b_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ASSERT(bp->b_flags & XBF_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * We never locked this buf ourselves, so we shouldn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * brelse it either. Just get out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (XFS_FORCED_SHUTDOWN(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * Check if the caller is trying to read a buffer that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * already attached to the transaction yet has no buffer ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * assigned. Ops are usually attached when the buffer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * attached to the transaction, or by the read caller if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * special circumstances. That didn't happen, which is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * how this is supposed to go.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * If the buffer passes verification we'll let this go, but if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * not we have to shut down. Let the transaction cleanup code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * release this buffer when it kills the tranaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) ASSERT(bp->b_ops != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) error = xfs_buf_reverify(bp, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) xfs_buf_ioerror_alert(bp, __return_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (tp->t_flags & XFS_TRANS_DIRTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) xfs_force_shutdown(tp->t_mountp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) SHUTDOWN_META_IO_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /* bad CRC means corrupted metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (error == -EFSBADCRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) error = -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) bip->bli_recur++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) trace_xfs_trans_read_buf_recur(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) ASSERT(bp->b_ops != NULL || ops == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) *bpp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) error = xfs_buf_read_map(target, map, nmaps, flags, &bp, ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) __return_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) switch (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (tp && (tp->t_flags & XFS_TRANS_DIRTY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* fall through */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) case -ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) case -EAGAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (XFS_FORCED_SHUTDOWN(mp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (tp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) _xfs_trans_bjoin(tp, bp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) trace_xfs_trans_read_buf(bp->b_log_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ASSERT(bp->b_ops != NULL || ops == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) *bpp = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* Has this buffer been dirtied by anyone? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) xfs_trans_buf_is_dirty(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (!bip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * Release a buffer previously joined to the transaction. If the buffer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * modified within this transaction, decrement the recursion count but do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * release the buffer even if the count goes to 0. If the buffer is not modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * within the transaction, decrement the recursion count and release the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * if the recursion count goes to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * If the buffer is to be released and it was not already dirty before this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * transaction began, then also free the buf_log_item associated with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * If the transaction pointer is NULL, this is a normal xfs_buf_relse() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) xfs_trans_brelse(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) ASSERT(bp->b_transp == tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (!tp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) trace_xfs_trans_brelse(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * If the release is for a recursive lookup, then decrement the count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * and return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (bip->bli_recur > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) bip->bli_recur--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * If the buffer is invalidated or dirty in this transaction, we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * release it until we commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (bip->bli_flags & XFS_BLI_STALE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * Unlink the log item from the transaction and clear the hold flag, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * set. We wouldn't want the next user of the buffer to get confused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) xfs_trans_del_item(&bip->bli_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) bip->bli_flags &= ~XFS_BLI_HOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* drop the reference to the bli */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) xfs_buf_item_put(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) bp->b_transp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * Mark the buffer as not needing to be unlocked when the buf item's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * iop_committing() routine is called. The buffer must already be locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * and associated with the given transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* ARGSUSED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) xfs_trans_bhold(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) xfs_trans_t *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) xfs_buf_t *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) ASSERT(bp->b_transp == tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ASSERT(bip != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) bip->bli_flags |= XFS_BLI_HOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) trace_xfs_trans_bhold(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * Cancel the previous buffer hold request made on this buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * for this transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) xfs_trans_bhold_release(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) xfs_trans_t *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) xfs_buf_t *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) ASSERT(bp->b_transp == tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ASSERT(bip != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ASSERT(bip->bli_flags & XFS_BLI_HOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) bip->bli_flags &= ~XFS_BLI_HOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) trace_xfs_trans_bhold_release(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * Mark a buffer dirty in the transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) xfs_trans_dirty_buf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ASSERT(bp->b_transp == tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) ASSERT(bip != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Mark the buffer as needing to be written out eventually,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * and set its iodone function to remove the buffer's buf log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * item from the AIL and free it when the buffer is flushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * to disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) bp->b_flags |= XBF_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * If we invalidated the buffer within this transaction, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * cancel the invalidation now that we're dirtying the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * again. There are no races with the code in xfs_buf_item_unpin(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * because we have a reference to the buffer this entire time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) if (bip->bli_flags & XFS_BLI_STALE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) bip->bli_flags &= ~XFS_BLI_STALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ASSERT(bp->b_flags & XBF_STALE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) bp->b_flags &= ~XBF_STALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) bip->bli_flags |= XFS_BLI_DIRTY | XFS_BLI_LOGGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) tp->t_flags |= XFS_TRANS_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * This is called to mark bytes first through last inclusive of the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * buffer as needing to be logged when the transaction is committed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * The buffer must already be associated with the given transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * First and last are numbers relative to the beginning of this buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * so the first byte in the buffer is numbered 0 regardless of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * value of b_blkno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) xfs_trans_log_buf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct xfs_buf *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) uint first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) uint last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ASSERT(first <= last && last < BBTOB(bp->b_length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) xfs_trans_dirty_buf(tp, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) trace_xfs_trans_log_buf(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) xfs_buf_item_log(bip, first, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * Invalidate a buffer that is being used within a transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * Typically this is because the blocks in the buffer are being freed, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * need to prevent it from being written out when we're done. Allowing it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * to be written again might overwrite data in the free blocks if they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * reallocated to a file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * We prevent the buffer from being written out by marking it stale. We can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * get rid of the buf log item at this point because the buffer may still be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * pinned by another transaction. If that is the case, then we'll wait until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * the buffer is committed to disk for the last time (we can tell by the ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * count) and free it in xfs_buf_item_unpin(). Until that happens we will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * keep the buffer locked so that the buffer and buf log item are not reused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * We also set the XFS_BLF_CANCEL flag in the buf log format structure and log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * the buf item. This will be used at recovery time to determine that copies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * of the buffer in the log before this should not be replayed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * We mark the item descriptor and the transaction dirty so that we'll hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * the buffer until after the commit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * Since we're invalidating the buffer, we also clear the state about which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * parts of the buffer have been logged. We also clear the flag indicating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * that this is an inode buffer since the data in the buffer will no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * be valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * We set the stale bit in the buffer as well since we're getting rid of it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) xfs_trans_binval(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) xfs_trans_t *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) xfs_buf_t *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ASSERT(bp->b_transp == tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) ASSERT(bip != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) trace_xfs_trans_binval(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (bip->bli_flags & XFS_BLI_STALE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * If the buffer is already invalidated, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * just return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) ASSERT(bp->b_flags & XBF_STALE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ASSERT(!(bip->__bli_format.blf_flags & XFS_BLFT_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) ASSERT(test_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) ASSERT(tp->t_flags & XFS_TRANS_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) xfs_buf_stale(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) bip->bli_flags |= XFS_BLI_STALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) bip->bli_flags &= ~(XFS_BLI_INODE_BUF | XFS_BLI_LOGGED | XFS_BLI_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) bip->__bli_format.blf_flags &= ~XFS_BLF_INODE_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) bip->__bli_format.blf_flags |= XFS_BLF_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) bip->__bli_format.blf_flags &= ~XFS_BLFT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) for (i = 0; i < bip->bli_format_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) memset(bip->bli_formats[i].blf_data_map, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) (bip->bli_formats[i].blf_map_size * sizeof(uint)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) set_bit(XFS_LI_DIRTY, &bip->bli_item.li_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) tp->t_flags |= XFS_TRANS_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * This call is used to indicate that the buffer contains on-disk inodes which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * must be handled specially during recovery. They require special handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * because only the di_next_unlinked from the inodes in the buffer should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * recovered. The rest of the data in the buffer is logged via the inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * themselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * All we do is set the XFS_BLI_INODE_BUF flag in the items flags so it can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * transferred to the buffer's log format structure so that we'll know what to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * do at recovery time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) xfs_trans_inode_buf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) xfs_trans_t *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) xfs_buf_t *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) ASSERT(bp->b_transp == tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ASSERT(bip != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) bip->bli_flags |= XFS_BLI_INODE_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) bp->b_flags |= _XBF_INODES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * This call is used to indicate that the buffer is going to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * be staled and was an inode buffer. This means it gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * special processing during unpin - where any inodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * associated with the buffer should be removed from ail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * There is also special processing during recovery,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * any replay of the inodes in the buffer needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * prevented as the buffer may have been reused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) xfs_trans_stale_inode_buf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) xfs_trans_t *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) xfs_buf_t *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) ASSERT(bp->b_transp == tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) ASSERT(bip != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) bip->bli_flags |= XFS_BLI_STALE_INODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) bp->b_flags |= _XBF_INODES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * Mark the buffer as being one which contains newly allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * inodes. We need to make sure that even if this buffer is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * relogged as an 'inode buf' we still recover all of the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * images in the face of a crash. This works in coordination with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * xfs_buf_item_committed() to ensure that the buffer remains in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * AIL at its original location even after it has been relogged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) /* ARGSUSED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) xfs_trans_inode_alloc_buf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) xfs_trans_t *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) xfs_buf_t *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) ASSERT(bp->b_transp == tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) ASSERT(bip != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) bp->b_flags |= _XBF_INODES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * Mark the buffer as ordered for this transaction. This means that the contents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * of the buffer are not recorded in the transaction but it is tracked in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * AIL as though it was. This allows us to record logical changes in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * transactions rather than the physical changes we make to the buffer without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * changing writeback ordering constraints of metadata buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) xfs_trans_ordered_buf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) ASSERT(bp->b_transp == tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) ASSERT(bip != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (xfs_buf_item_dirty_format(bip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) bip->bli_flags |= XFS_BLI_ORDERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) trace_xfs_buf_item_ordered(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * We don't log a dirty range of an ordered buffer but it still needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * to be marked dirty and that it has been logged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) xfs_trans_dirty_buf(tp, bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * Set the type of the buffer for log recovery so that it can correctly identify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * and hence attach the correct buffer ops to the buffer after replay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) xfs_trans_buf_set_type(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct xfs_trans *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct xfs_buf *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) enum xfs_blft type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (!tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ASSERT(bp->b_transp == tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ASSERT(bip != NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) xfs_blft_to_flags(&bip->__bli_format, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) xfs_trans_buf_copy_type(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct xfs_buf *dst_bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct xfs_buf *src_bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct xfs_buf_log_item *sbip = src_bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct xfs_buf_log_item *dbip = dst_bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) enum xfs_blft type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) type = xfs_blft_from_flags(&sbip->__bli_format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) xfs_blft_to_flags(&dbip->__bli_format, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * Similar to xfs_trans_inode_buf(), this marks the buffer as a cluster of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * dquots. However, unlike in inode buffer recovery, dquot buffers get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * recovered in their entirety. (Hence, no XFS_BLI_DQUOT_ALLOC_BUF flag).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * The only thing that makes dquot buffers different from regular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * buffers is that we must not replay dquot bufs when recovering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * if a _corresponding_ quotaoff has happened. We also have to distinguish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * between usr dquot bufs and grp dquot bufs, because usr and grp quotas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * can be turned off independently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* ARGSUSED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) xfs_trans_dquot_buf(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) xfs_trans_t *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) xfs_buf_t *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) uint type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ASSERT(type == XFS_BLF_UDQUOT_BUF ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) type == XFS_BLF_PDQUOT_BUF ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) type == XFS_BLF_GDQUOT_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) bip->__bli_format.blf_flags |= type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) case XFS_BLF_UDQUOT_BUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) type = XFS_BLFT_UDQUOT_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) case XFS_BLF_PDQUOT_BUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) type = XFS_BLFT_PDQUOT_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) case XFS_BLF_GDQUOT_BUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) type = XFS_BLFT_GDQUOT_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) type = XFS_BLFT_UNKNOWN_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) bp->b_flags |= _XBF_DQUOTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) xfs_trans_buf_set_type(tp, bp, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }