^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2000-2005 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "xfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "xfs_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "xfs_shared.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "xfs_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "xfs_log_format.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "xfs_trans_resv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "xfs_bit.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "xfs_mount.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "xfs_trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "xfs_trans_priv.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "xfs_buf_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "xfs_inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "xfs_inode_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "xfs_quota.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "xfs_dquot_item.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "xfs_dquot.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "xfs_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "xfs_log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) kmem_zone_t *xfs_buf_item_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) return container_of(lip, struct xfs_buf_log_item, bli_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* Is this log iovec plausibly large enough to contain the buffer log format? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) xfs_buf_log_check_iovec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct xfs_log_iovec *iovec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct xfs_buf_log_format *blfp = iovec->i_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) char *bmp_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) char *item_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) item_end = (char *)iovec->i_addr + iovec->i_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return bmp_end <= item_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) xfs_buf_log_format_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct xfs_buf_log_format *blfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return offsetof(struct xfs_buf_log_format, blf_data_map) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * This returns the number of log iovecs needed to log the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * given buf log item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * It calculates this as 1 iovec for the buf log format structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * and 1 for each stretch of non-contiguous chunks to be logged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * Contiguous chunks are logged in a single iovec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * If the XFS_BLI_STALE flag has been set, then log nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) xfs_buf_item_size_segment(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct xfs_buf_log_item *bip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct xfs_buf_log_format *blfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) int *nvecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int *nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct xfs_buf *bp = bip->bli_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int next_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int last_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (last_bit == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * initial count for a dirty buffer is 2 vectors - the format structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * and the first dirty region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) *nvecs += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) while (last_bit != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * This takes the bit number to start looking from and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * returns the next set bit from there. It returns -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * if there are no more bits set or the start bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * beyond the end of the bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) last_bit + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * If we run out of bits, leave the loop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * else if we find a new set of bits bump the number of vecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * else keep scanning the current set of bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (next_bit == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) } else if (next_bit != last_bit + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) last_bit = next_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) (*nvecs)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) XFS_BLF_CHUNK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) last_bit = next_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) (*nvecs)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) last_bit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) *nbytes += XFS_BLF_CHUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * This returns the number of log iovecs needed to log the given buf log item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * It calculates this as 1 iovec for the buf log format structure and 1 for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * in a single iovec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * Discontiguous buffers need a format structure per region that is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * logged. This makes the changes in the buffer appear to log recovery as though
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * they came from separate buffers, just like would occur if multiple buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * were used instead of a single discontiguous buffer. This enables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * discontiguous buffers to be in-memory constructs, completely transparent to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * what ends up on disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * format structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) xfs_buf_item_size(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct xfs_log_item *lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int *nvecs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) int *nbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct xfs_buf_log_item *bip = BUF_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (bip->bli_flags & XFS_BLI_STALE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * The buffer is stale, so all we need to log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * is the buf log format structure with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * cancel flag in it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) trace_xfs_buf_item_size_stale(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) *nvecs += bip->bli_format_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) for (i = 0; i < bip->bli_format_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (bip->bli_flags & XFS_BLI_ORDERED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * The buffer has been logged just to order it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * It is not being included in the transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * commit, so no vectors are used at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) trace_xfs_buf_item_size_ordered(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *nvecs = XFS_LOG_VEC_ORDERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * the vector count is based on the number of buffer vectors we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * dirty bits in. This will only be greater than one when we have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * compound buffer with more than one segment dirty. Hence for compound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * buffers we need to track which segment the dirty bits correspond to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * and when we move from one segment to the next increment the vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * count for the extra buf log format structure that will need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) for (i = 0; i < bip->bli_format_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) nvecs, nbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) trace_xfs_buf_item_size(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) xfs_buf_item_copy_iovec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct xfs_log_vec *lv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct xfs_log_iovec **vecp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct xfs_buf *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) uint offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) int first_bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) uint nbits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) offset += first_bit * XFS_BLF_CHUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) xfs_buf_offset(bp, offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) nbits * XFS_BLF_CHUNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) xfs_buf_item_straddle(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct xfs_buf *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) uint offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) int next_bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) int last_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) XFS_BLF_CHUNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) xfs_buf_item_format_segment(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct xfs_buf_log_item *bip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct xfs_log_vec *lv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct xfs_log_iovec **vecp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) uint offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) struct xfs_buf_log_format *blfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct xfs_buf *bp = bip->bli_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) uint base_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) int first_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int last_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) int next_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) uint nbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* copy the flags across from the base format item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) blfp->blf_flags = bip->__bli_format.blf_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * Base size is the actual size of the ondisk structure - it reflects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * the actual size of the dirty bitmap rather than the size of the in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * memory structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) base_size = xfs_buf_log_format_size(blfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * If the map is not be dirty in the transaction, mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * the size as zero and do not advance the vector pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) blfp->blf_size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (bip->bli_flags & XFS_BLI_STALE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * The buffer is stale, so all we need to log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * is the buf log format structure with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * cancel flag in it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) trace_xfs_buf_item_format_stale(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * Fill in an iovec for each set of contiguous chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) last_bit = first_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) nbits = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * This takes the bit number to start looking from and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * returns the next set bit from there. It returns -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * if there are no more bits set or the start bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * beyond the end of the bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) (uint)last_bit + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * If we run out of bits fill in the last iovec and get out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * the loop. Else if we start a new set of bits then fill in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * the iovec for the series we were looking at and start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * counting the bits in the new one. Else we're still in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * same set of bits so just keep counting and scanning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) if (next_bit == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) first_bit, nbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) blfp->blf_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) } else if (next_bit != last_bit + 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) first_bit, nbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) blfp->blf_size++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) first_bit = next_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) last_bit = next_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) nbits = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) last_bit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) nbits++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * This is called to fill in the vector of log iovecs for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * given log buf item. It fills the first entry with a buf log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * format structure, and the rest point to contiguous chunks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * within the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) xfs_buf_item_format(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) struct xfs_log_item *lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct xfs_log_vec *lv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct xfs_buf_log_item *bip = BUF_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct xfs_buf *bp = bip->bli_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct xfs_log_iovec *vecp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) uint offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) (bip->bli_flags & XFS_BLI_STALE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) (bip->bli_flags & XFS_BLI_STALE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * If it is an inode buffer, transfer the in-memory state to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * format flags and clear the in-memory state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * For buffer based inode allocation, we do not transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * this state if the inode buffer allocation has not yet been committed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * correct replay of the inode allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * For icreate item based inode allocation, the buffers aren't written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * to the journal during allocation, and hence we should always tag the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * buffer as an inode buffer so that the correct unlinked list replay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * occurs during recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (bip->bli_flags & XFS_BLI_INODE_BUF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (xfs_sb_version_has_v3inode(&lip->li_mountp->m_sb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) xfs_log_item_in_current_chkpt(lip)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) bip->bli_flags &= ~XFS_BLI_INODE_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) for (i = 0; i < bip->bli_format_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) xfs_buf_item_format_segment(bip, lv, &vecp, offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) &bip->bli_formats[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) offset += BBTOB(bp->b_maps[i].bm_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * Check to make sure everything is consistent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) trace_xfs_buf_item_format(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * This is called to pin the buffer associated with the buf log item in memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * so it cannot be written out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * We also always take a reference to the buffer log item here so that the bli
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * is held while the item is pinned in memory. This means that we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * unconditionally drop the reference count a transaction holds when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * transaction is completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) xfs_buf_item_pin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) struct xfs_log_item *lip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct xfs_buf_log_item *bip = BUF_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) (bip->bli_flags & XFS_BLI_ORDERED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) (bip->bli_flags & XFS_BLI_STALE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) trace_xfs_buf_item_pin(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) atomic_inc(&bip->bli_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) atomic_inc(&bip->bli_buf->b_pin_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * This is called to unpin the buffer associated with the buf log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * item which was previously pinned with a call to xfs_buf_item_pin().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * Also drop the reference to the buf item for the current transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * If the XFS_BLI_STALE flag is set and we are the last reference,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * then free up the buf log item and unlock the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * If the remove flag is set we are called from uncommit in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * forced-shutdown path. If that is true and the reference count on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * the log item is going to drop to zero we need to free the item's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * descriptor in the transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) xfs_buf_item_unpin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct xfs_log_item *lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) int remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct xfs_buf_log_item *bip = BUF_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) xfs_buf_t *bp = bip->bli_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int stale = bip->bli_flags & XFS_BLI_STALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) int freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) ASSERT(bp->b_log_item == bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ASSERT(atomic_read(&bip->bli_refcount) > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) trace_xfs_buf_item_unpin(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) freed = atomic_dec_and_test(&bip->bli_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (atomic_dec_and_test(&bp->b_pin_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) wake_up_all(&bp->b_waiters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (freed && stale) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ASSERT(bip->bli_flags & XFS_BLI_STALE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ASSERT(xfs_buf_islocked(bp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ASSERT(bp->b_flags & XBF_STALE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) trace_xfs_buf_item_unpin_stale(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (remove) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * If we are in a transaction context, we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * remove the log item from the transaction as we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * about to release our reference to the buffer. If we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * don't, the unlock that occurs later in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * xfs_trans_uncommit() will try to reference the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * buffer which we no longer have a hold on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (!list_empty(&lip->li_trans))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) xfs_trans_del_item(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * Since the transaction no longer refers to the buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * the buffer should no longer refer to the transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) bp->b_transp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * If we get called here because of an IO error, we may or may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * not have the item on the AIL. xfs_trans_ail_delete() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * take care of that situation. xfs_trans_ail_delete() drops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * the AIL lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) if (bip->bli_flags & XFS_BLI_STALE_INODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) xfs_buf_item_done(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) xfs_buf_inode_iodone(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) ASSERT(list_empty(&bp->b_li_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) xfs_trans_ail_delete(lip, SHUTDOWN_LOG_IO_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) xfs_buf_item_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ASSERT(bp->b_log_item == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) } else if (freed && remove) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * The buffer must be locked and held by the caller to simulate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * an async I/O failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) xfs_buf_lock(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) xfs_buf_hold(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) bp->b_flags |= XBF_ASYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) xfs_buf_ioend_fail(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) STATIC uint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) xfs_buf_item_push(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct xfs_log_item *lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct list_head *buffer_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct xfs_buf_log_item *bip = BUF_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct xfs_buf *bp = bip->bli_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) uint rval = XFS_ITEM_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (xfs_buf_ispinned(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return XFS_ITEM_PINNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!xfs_buf_trylock(bp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * If we have just raced with a buffer being pinned and it has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * been marked stale, we could end up stalling until someone else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * issues a log force to unpin the stale buffer. Check for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * race condition here so xfsaild recognizes the buffer is pinned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * and queues a log force to move it along.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (xfs_buf_ispinned(bp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) return XFS_ITEM_PINNED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return XFS_ITEM_LOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) trace_xfs_buf_item_push(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* has a previous flush failed due to IO errors? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (bp->b_flags & XBF_WRITE_FAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) xfs_buf_alert_ratelimited(bp, "XFS: Failing async write",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) "Failing async write on buffer block 0x%llx. Retrying async write.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) (long long)bp->b_bn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (!xfs_buf_delwri_queue(bp, buffer_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) rval = XFS_ITEM_FLUSHING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) xfs_buf_unlock(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * Drop the buffer log item refcount and take appropriate action. This helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * determines whether the bli must be freed or not, since a decrement to zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * does not necessarily mean the bli is unused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * Return true if the bli is freed, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) xfs_buf_item_put(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct xfs_buf_log_item *bip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct xfs_log_item *lip = &bip->bli_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) bool aborted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) bool dirty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /* drop the bli ref and return if it wasn't the last one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (!atomic_dec_and_test(&bip->bli_refcount))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * We dropped the last ref and must free the item if clean or aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * If the bli is dirty and non-aborted, the buffer was clean in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * transaction but still awaiting writeback from previous changes. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * that case, the bli is freed on buffer writeback completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) XFS_FORCED_SHUTDOWN(lip->li_mountp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) dirty = bip->bli_flags & XFS_BLI_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (dirty && !aborted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * The bli is aborted or clean. An aborted item may be in the AIL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * regardless of dirty state. For example, consider an aborted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * transaction that invalidated a dirty bli and cleared the dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (aborted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) xfs_trans_ail_delete(lip, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) xfs_buf_item_relse(bip->bli_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * Release the buffer associated with the buf log item. If there is no dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * logged data associated with the buffer recorded in the buf log item, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * free the buf log item and remove the reference to it in the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * This call ignores the recursion count. It is only called when the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * should REALLY be unlocked, regardless of the recursion count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * We unconditionally drop the transaction's reference to the log item. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * item was logged, then another reference was taken when it was pinned, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * can safely drop the transaction reference now. This also allows us to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * potential races with the unpin code freeing the bli by not referencing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * bli after we've dropped the reference count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * if necessary but do not unlock the buffer. This is for support of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * free the item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) xfs_buf_item_release(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct xfs_log_item *lip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) struct xfs_buf_log_item *bip = BUF_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct xfs_buf *bp = bip->bli_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) bool released;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) bool hold = bip->bli_flags & XFS_BLI_HOLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) bool stale = bip->bli_flags & XFS_BLI_STALE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) #if defined(DEBUG) || defined(XFS_WARN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) bool aborted = test_bit(XFS_LI_ABORTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) &lip->li_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) trace_xfs_buf_item_release(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * The bli dirty state should match whether the blf has logged segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * except for ordered buffers, where only the bli should be dirty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * Clear the buffer's association with this transaction and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * per-transaction state from the bli, which has been copied above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) bp->b_transp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) * Unref the item and unlock the buffer unless held or stale. Stale
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) * buffers remain locked until final unpin unless the bli is freed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * the unref call. The latter implies shutdown because buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * invalidation dirties the bli and transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) released = xfs_buf_item_put(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (hold || (stale && !released))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ASSERT(!stale || aborted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) xfs_buf_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) xfs_buf_item_committing(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct xfs_log_item *lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) xfs_lsn_t commit_lsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) return xfs_buf_item_release(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * This is called to find out where the oldest active copy of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * buf log item in the on disk log resides now that the last log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * write of it completed at the given lsn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) * We always re-log all the dirty data in a buffer, so usually the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * latest copy in the on disk log is the only one that matters. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * those cases we simply return the given lsn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * The one exception to this is for buffers full of newly allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * flag set, indicating that only the di_next_unlinked fields from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * inodes in the buffers will be replayed during recovery. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * original newly allocated inode images have not yet been flushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * when the buffer is so relogged, then we need to make sure that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * keep the old images in the 'active' portion of the log. We do this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * by returning the original lsn of that transaction here rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * the current one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) STATIC xfs_lsn_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) xfs_buf_item_committed(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct xfs_log_item *lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) xfs_lsn_t lsn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct xfs_buf_log_item *bip = BUF_ITEM(lip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) trace_xfs_buf_item_committed(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return lip->li_lsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return lsn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) static const struct xfs_item_ops xfs_buf_item_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) .iop_size = xfs_buf_item_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) .iop_format = xfs_buf_item_format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) .iop_pin = xfs_buf_item_pin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) .iop_unpin = xfs_buf_item_unpin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) .iop_release = xfs_buf_item_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) .iop_committing = xfs_buf_item_committing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) .iop_committed = xfs_buf_item_committed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) .iop_push = xfs_buf_item_push,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) xfs_buf_item_get_format(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct xfs_buf_log_item *bip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) ASSERT(bip->bli_formats == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) bip->bli_format_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (count == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) bip->bli_formats = &bip->__bli_format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) xfs_buf_item_free_format(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct xfs_buf_log_item *bip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (bip->bli_formats != &bip->__bli_format) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) kmem_free(bip->bli_formats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) bip->bli_formats = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * Allocate a new buf log item to go with the given buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * Set the buffer's b_log_item field to point to the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * buf log item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) xfs_buf_item_init(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) struct xfs_buf *bp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct xfs_mount *mp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) int chunks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) int map_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * Check to see if there is already a buf log item for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * this buffer. If we do already have one, there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * nothing to do here so return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) ASSERT(bp->b_mount == mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) if (bip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) ASSERT(!bp->b_transp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) ASSERT(bip->bli_buf == bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) bip = kmem_cache_zalloc(xfs_buf_item_zone, GFP_KERNEL | __GFP_NOFAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) bip->bli_buf = bp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * can be divided into. Make sure not to truncate any pieces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * map_size is the size of the bitmap needed to describe the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * chunks of the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) * Discontiguous buffer support follows the layout of the underlying
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) * buffer. This makes the implementation as simple as possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) xfs_buf_item_get_format(bip, bp->b_map_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) for (i = 0; i < bip->bli_format_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) XFS_BLF_CHUNK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) map_size = DIV_ROUND_UP(chunks, NBWORD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (map_size > XFS_BLF_DATAMAP_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) kmem_cache_free(xfs_buf_item_zone, bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) xfs_err(mp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) "buffer item dirty bitmap (%u uints) too small to reflect %u bytes!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) map_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) BBTOB(bp->b_maps[i].bm_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return -EFSCORRUPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) bip->bli_formats[i].blf_type = XFS_LI_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) bip->bli_formats[i].blf_map_size = map_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) bp->b_log_item = bip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) xfs_buf_hold(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * Mark bytes first through last inclusive as dirty in the buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * item's bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) xfs_buf_item_log_segment(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) uint first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) uint last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) uint *map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) uint first_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) uint last_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) uint bits_to_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) uint bits_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) uint word_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) uint *wordp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) uint bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) uint end_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) uint mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ASSERT(first < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ASSERT(last < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * Convert byte offsets to bit numbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) first_bit = first >> XFS_BLF_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) last_bit = last >> XFS_BLF_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * Calculate the total number of bits to be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) bits_to_set = last_bit - first_bit + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * Get a pointer to the first word in the bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * to set a bit in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) word_num = first_bit >> BIT_TO_WORD_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) wordp = &map[word_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * Calculate the starting bit in the first word.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) bit = first_bit & (uint)(NBWORD - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * First set any bits in the first word of our range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * If it starts at bit 0 of the word, it will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * set below rather than here. That is what the variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * bit tells us. The variable bits_set tracks the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * of bits that have been set so far. End_bit is the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * of the last bit to be set in this word plus one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) end_bit = min(bit + bits_to_set, (uint)NBWORD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) mask = ((1U << (end_bit - bit)) - 1) << bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) *wordp |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) wordp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) bits_set = end_bit - bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) bits_set = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * Now set bits a whole word at a time that are between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * first_bit and last_bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) while ((bits_to_set - bits_set) >= NBWORD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) *wordp = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) bits_set += NBWORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) wordp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * Finally, set any bits left to be set in one last partial word.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) end_bit = bits_to_set - bits_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (end_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) mask = (1U << end_bit) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) *wordp |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * Mark bytes first through last inclusive as dirty in the buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * item's bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) xfs_buf_item_log(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct xfs_buf_log_item *bip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) uint first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) uint last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) uint start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) uint end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct xfs_buf *bp = bip->bli_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * walk each buffer segment and mark them dirty appropriately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) for (i = 0; i < bip->bli_format_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (start > last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /* skip to the map that includes the first byte to log */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) if (first > end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) start += BBTOB(bp->b_maps[i].bm_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * Trim the range to this segment and mark it in the bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * Note that we must convert buffer offsets to segment relative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * offsets (e.g., the first byte of each segment is byte 0 of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * that segment).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (first < start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) first = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (end > last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) end = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) xfs_buf_item_log_segment(first - start, end - start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) &bip->bli_formats[i].blf_data_map[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) start += BBTOB(bp->b_maps[i].bm_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * Return true if the buffer has any ranges logged/dirtied by a transaction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) xfs_buf_item_dirty_format(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct xfs_buf_log_item *bip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) for (i = 0; i < bip->bli_format_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) bip->bli_formats[i].blf_map_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) STATIC void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) xfs_buf_item_free(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct xfs_buf_log_item *bip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) xfs_buf_item_free_format(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) kmem_free(bip->bli_item.li_lv_shadow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) kmem_cache_free(xfs_buf_item_zone, bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * xfs_buf_item_relse() is called when the buf log item is no longer needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) xfs_buf_item_relse(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) xfs_buf_t *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct xfs_buf_log_item *bip = bp->b_log_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) trace_xfs_buf_item_relse(bp, _RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) bp->b_log_item = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) xfs_buf_rele(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) xfs_buf_item_free(bip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) xfs_buf_item_done(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct xfs_buf *bp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * If we are forcibly shutting down, this may well be off the AIL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * already. That's because we simulate the log-committed callbacks to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * unpin these buffers. Or we may never have put this item on AIL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * because of the transaction was aborted forcibly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * xfs_trans_ail_delete() takes care of these.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) * Either way, AIL is useless if we're forcing a shutdown.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * Note that log recovery writes might have buffer items that are not on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * the AIL even when the file system is not shut down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) xfs_trans_ail_delete(&bp->b_log_item->bli_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) (bp->b_flags & _XBF_LOGRECOVERY) ? 0 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) SHUTDOWN_CORRUPT_INCORE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) xfs_buf_item_relse(bp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }