Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/mempool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/gfs2_ondisk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/list_sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include "bmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include "dir.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include "gfs2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include "incore.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "glock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "glops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include "log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include "lops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include "meta_io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include "recovery.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include "rgrp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include "trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include "util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include "trace_gfs2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * gfs2_pin - Pin a buffer in memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * @sdp: The superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * @bh: The buffer to be pinned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * The log lock must be held when calling this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	struct gfs2_bufdata *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	BUG_ON(!current->journal_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	clear_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	if (test_set_buffer_pinned(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 		gfs2_assert_withdraw(sdp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	if (!buffer_uptodate(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 		gfs2_io_error_bh_wd(sdp, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	bd = bh->b_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	/* If this buffer is in the AIL and it has already been written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	 * to in-place disk block, remove it from the AIL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	spin_lock(&sdp->sd_ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	if (bd->bd_tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	spin_unlock(&sdp->sd_ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	get_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	atomic_inc(&sdp->sd_log_pinned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	trace_gfs2_pin(bd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static void maybe_release_space(struct gfs2_bufdata *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	struct gfs2_glock *gl = bd->bd_gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	struct gfs2_bitmap *bi = rgd->rd_bits + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	if (bi->bi_clone == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	if (sdp->sd_args.ar_discard)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 		gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	memcpy(bi->bi_clone + bi->bi_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	       bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	clear_bit(GBF_FULL, &bi->bi_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	rgd->rd_free_clone = rgd->rd_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	rgd->rd_extfail_pt = rgd->rd_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  * gfs2_unpin - Unpin a buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  * @sdp: the filesystem the buffer belongs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  * @bh: The buffer to unpin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94)  * @ai:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  * @flags: The inode dirty flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 		       struct gfs2_trans *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	struct gfs2_bufdata *bd = bh->b_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	BUG_ON(!buffer_uptodate(bh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	BUG_ON(!buffer_pinned(bh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	clear_buffer_pinned(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	if (buffer_is_rgrp(bd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		maybe_release_space(bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	spin_lock(&sdp->sd_ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	if (bd->bd_tr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		list_del(&bd->bd_ail_st_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		struct gfs2_glock *gl = bd->bd_gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		atomic_inc(&gl->gl_ail_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	bd->bd_tr = tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	spin_unlock(&sdp->sd_ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	trace_gfs2_pin(bd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	atomic_dec(&sdp->sd_log_pinned);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) void gfs2_log_incr_head(struct gfs2_sbd *sdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	       (sdp->sd_log_flush_head != sdp->sd_log_head));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 		sdp->sd_log_flush_head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	struct gfs2_journal_extent *je;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	list_for_each_entry(je, &jd->extent_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		if (lblock >= je->lblock && lblock < je->lblock + je->blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 			return je->dblock + lblock - je->lblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * gfs2_end_log_write_bh - end log write of pagecache data with buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  * @sdp: The superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  * @bvec: The bio_vec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * @error: The i/o status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  * This finds the relevant buffers and unlocks them and sets the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  * error flag according to the status of the i/o request. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  * used when the log is writing data which has an in-place version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163)  * that is pinned in the pagecache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 				  struct bio_vec *bvec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 				  blk_status_t error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	struct buffer_head *bh, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	struct page *page = bvec->bv_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	unsigned size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	bh = page_buffers(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	size = bvec->bv_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	while (bh_offset(bh) < bvec->bv_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		bh = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 			mark_buffer_write_io_error(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 		unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		next = bh->b_this_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		size -= bh->b_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		bh = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	} while(bh && size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  * gfs2_end_log_write - end of i/o to the log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  * @bio: The bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  * Each bio_vec contains either data from the pagecache or data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  * relating to the log itself. Here we iterate over the bio_vec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  * array, processing both kinds of data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) static void gfs2_end_log_write(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	struct gfs2_sbd *sdp = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	struct bio_vec *bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	struct bvec_iter_all iter_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	if (bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 		if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 			fs_err(sdp, "Error %d writing to journal, jid=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 			       bio->bi_status, sdp->sd_jdesc->jd_jid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		gfs2_withdraw_delayed(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 		/* prevent more writes to the journal */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		wake_up(&sdp->sd_logd_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	bio_for_each_segment_all(bvec, bio, iter_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		page = bvec->bv_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		if (page_has_buffers(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 			gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 			mempool_free(page, gfs2_page_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		wake_up(&sdp->sd_log_flush_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  * gfs2_log_submit_bio - Submit any pending log bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231)  * @biop: Address of the bio pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232)  * @opf: REQ_OP | op_flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234)  * Submit any pending part-built or full bio to the block device. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235)  * there is no pending bio, then this is a no-op.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) void gfs2_log_submit_bio(struct bio **biop, int opf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	struct bio *bio = *biop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		struct gfs2_sbd *sdp = bio->bi_private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		atomic_inc(&sdp->sd_log_in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		bio->bi_opf = opf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		*biop = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  * gfs2_log_alloc_bio - Allocate a bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  * @sdp: The super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  * @blkno: The device block number we want to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  * @end_io: The bi_end_io callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  * Allocate a new bio, initialize it with the given parameters and return it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  * Returns: The newly allocated bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 				      bio_end_io_t *end_io)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	struct super_block *sb = sdp->sd_vfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	bio_set_dev(bio, sb->s_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	bio->bi_end_io = end_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	bio->bi_private = sdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	return bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276)  * gfs2_log_get_bio - Get cached log bio, or allocate a new one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  * @sdp: The super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)  * @blkno: The device block number we want to write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  * @bio: The bio to get or allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  * @op: REQ_OP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  * @end_io: The bi_end_io callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  * @flush: Always flush the current bio and allocate a new one?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  * If there is a cached bio, then if the next block number is sequential
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  * with the previous one, return it, otherwise flush the bio to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  * device. If there is no cached bio, or we just flushed it, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  * allocate a new one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)  * Returns: The bio to use for log writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 				    struct bio **biop, int op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 				    bio_end_io_t *end_io, bool flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	struct bio *bio = *biop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		u64 nblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		nblk = bio_end_sector(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		nblk >>= sdp->sd_fsb2bb_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		if (blkno == nblk && !flush)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 			return bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		gfs2_log_submit_bio(biop, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	*biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	return *biop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * gfs2_log_write - write to log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  * @sdp: the filesystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  * @page: the page to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  * @size: the size of the data to write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  * @offset: the offset within the page 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  * @blkno: block number of the log entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)  * Try and add the page segment to the current bio. If that fails,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  * submit the current bio to the device and create a new one, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  * then add the page segment to that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 		    unsigned size, unsigned offset, u64 blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	struct bio *bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 			       gfs2_end_log_write, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	ret = bio_add_page(bio, page, size, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 				       REQ_OP_WRITE, gfs2_end_log_write, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		ret = bio_add_page(bio, page, size, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		WARN_ON(ret == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  * gfs2_log_write_bh - write a buffer's content to the log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  * @sdp: The super block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  * @bh: The buffer pointing to the in-place location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)  * 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)  * This writes the content of the buffer to the next available location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348)  * in the log. The buffer will be unlocked once the i/o to the log has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349)  * completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	u64 dblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	gfs2_log_incr_head(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh), dblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * gfs2_log_write_page - write one block stored in a page, into the log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * @sdp: The superblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  * @page: The struct page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366)  * This writes the first block-sized part of the page into the log. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367)  * that the page must have been allocated from the gfs2_page_pool mempool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368)  * and that after this has been called, ownership has been transferred and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369)  * the page may be freed at any time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	struct super_block *sb = sdp->sd_vfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	u64 dblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	gfs2_log_incr_head(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)  * gfs2_end_log_read - end I/O callback for reads from the log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)  * @bio: The bio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386)  * Simply unlock the pages in the bio. The main thread will wait on them and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387)  * process them in order as necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) static void gfs2_end_log_read(struct bio *bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	struct bio_vec *bvec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	struct bvec_iter_all iter_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	bio_for_each_segment_all(bvec, bio, iter_all) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		page = bvec->bv_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		if (bio->bi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			int err = blk_status_to_errno(bio->bi_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 			SetPageError(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 			mapping_set_error(page->mapping, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	bio_put(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  * gfs2_jhead_pg_srch - Look for the journal head in a given page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412)  * @jd: The journal descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413)  * @page: The page to look in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415)  * Returns: 1 if found, 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 			      struct gfs2_log_header_host *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 			      struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	struct gfs2_log_header_host lh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	void *kaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			if (lh.lh_sequence >= head->lh_sequence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 				*head = lh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 			else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 				ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443)  * gfs2_jhead_process_page - Search/cleanup a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444)  * @jd: The journal descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445)  * @index: Index of the page to look into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446)  * @done: If set, perform only cleanup, else search and set if found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448)  * Find the page with 'index' in the journal's mapping. Search the page for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449)  * the journal head if requested (cleanup == false). Release refs on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450)  * page so the page cache can reclaim it (put_page() twice). We grabbed a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451)  * reference on this page two times, first when we did a find_or_create_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452)  * to obtain the page to add it to the bio and second when we do a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453)  * find_get_page() here to get the page to wait on while I/O on it is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454)  * completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455)  * This function is also used to free up a page we might've grabbed but not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456)  * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457)  * submitted the I/O, but we already found the jhead so we only need to drop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458)  * our references to the page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 				    struct gfs2_log_header_host *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 				    bool *done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	page = find_get_page(jd->jd_inode->i_mapping, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	wait_on_page_locked(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	if (PageError(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		*done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	if (!*done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		*done = gfs2_jhead_pg_srch(jd, head, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	put_page(page); /* Once for find_get_page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	put_page(page); /* Once more for find_or_create_page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	struct bio *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	new = bio_alloc(GFP_NOIO, nr_iovecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	bio_copy_dev(new, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	new->bi_iter.bi_sector = bio_end_sector(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	new->bi_opf = prev->bi_opf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	new->bi_write_hint = prev->bi_write_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	bio_chain(new, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	submit_bio(prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  * gfs2_find_jhead - find the head of a log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  * @jd: The journal descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  * @head: The log descriptor for the head of the log is returned here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499)  * Do a search of a journal by reading it in large chunks using bios and find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500)  * the valid log entry with the highest sequence number.  (i.e. the log head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  * Returns: 0 on success, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		    bool keep_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	struct address_space *mapping = jd->jd_inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	unsigned int bsize = sdp->sd_sb.sb_bsize, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	unsigned int shift = PAGE_SHIFT - bsize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	struct gfs2_journal_extent *je;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	int sz, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	struct bio *bio = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	bool done = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	errseq_t since;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	memset(head, 0, sizeof(*head));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	if (list_empty(&jd->extent_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		gfs2_map_journal_extents(sdp, jd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	since = filemap_sample_wb_err(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	list_for_each_entry(je, &jd->extent_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		u64 dblock = je->dblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		for (; block < je->lblock + je->blocks; block++, dblock++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 			if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 				page = find_or_create_page(mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 						block >> shift, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 				if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 					ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 					done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 				off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 			if (bio && (off || block < blocks_submitted + max_blocks)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 				sector_t sector = dblock << sdp->sd_fsb2bb_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 				if (bio_end_sector(bio) == sector) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 					sz = bio_add_page(bio, page, bsize, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 					if (sz == bsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 						goto block_added;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 				if (off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 					unsigned int blocks =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 						(PAGE_SIZE - off) >> bsize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 					bio = gfs2_chain_bio(bio, blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 					goto add_block_to_new_bio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			if (bio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 				blocks_submitted = block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 				submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			bio->bi_opf = REQ_OP_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) add_block_to_new_bio:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 			sz = bio_add_page(bio, page, bsize, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 			BUG_ON(sz != bsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) block_added:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			off += bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 			if (off == PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 				page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			if (blocks_submitted <= blocks_read + max_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 				/* Keep at least one bio in flight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 			gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			blocks_read += PAGE_SIZE >> bsize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			if (done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 				goto out;  /* found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	if (bio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		submit_bio(bio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	while (blocks_read < block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		blocks_read += PAGE_SIZE >> bsize_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		ret = filemap_check_wb_err(mapping, since);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	if (!keep_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		truncate_inode_pages(mapping, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 				      u32 ld_length, u32 ld_data1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	struct gfs2_log_descriptor *ld = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	clear_page(ld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	ld->ld_type = cpu_to_be32(ld_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	ld->ld_length = cpu_to_be32(ld_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	ld->ld_data1 = cpu_to_be32(ld_data1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	ld->ld_data2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) static void gfs2_check_magic(struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	__be32 *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	clear_buffer_escaped(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	kaddr = kmap_atomic(bh->b_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	ptr = kaddr + bh_offset(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	if (*ptr == cpu_to_be32(GFS2_MAGIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		set_buffer_escaped(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	struct gfs2_bufdata *bda, *bdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	bda = list_entry(a, struct gfs2_bufdata, bd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	bdb = list_entry(b, struct gfs2_bufdata, bd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 				unsigned int total, struct list_head *blist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 				bool is_databuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	struct gfs2_log_descriptor *ld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	struct gfs2_bufdata *bd1 = NULL, *bd2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	unsigned int num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	unsigned n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	__be64 *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	gfs2_log_lock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	list_sort(NULL, blist, blocknr_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	while(total) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		num = total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		if (total > limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			num = limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		gfs2_log_unlock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		page = gfs2_get_log_desc(sdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 					 is_databuf ? GFS2_LOG_DESC_JDATA :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 					 GFS2_LOG_DESC_METADATA, num + 1, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		ld = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		gfs2_log_lock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		ptr = (__be64 *)(ld + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		list_for_each_entry_continue(bd1, blist, bd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 			if (is_databuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 				gfs2_check_magic(bd1->bd_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 				*ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			if (++n >= num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		gfs2_log_unlock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		gfs2_log_write_page(sdp, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		gfs2_log_lock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		list_for_each_entry_continue(bd2, blist, bd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 			get_bh(bd2->bd_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			gfs2_log_unlock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			lock_buffer(bd2->bd_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			if (buffer_escaped(bd2->bd_bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 				void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 				page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 				ptr = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 				kaddr = kmap_atomic(bd2->bd_bh->b_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 				memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 				       bd2->bd_bh->b_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 				kunmap_atomic(kaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 				*(__be32 *)ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 				clear_buffer_escaped(bd2->bd_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 				unlock_buffer(bd2->bd_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 				brelse(bd2->bd_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 				gfs2_log_write_page(sdp, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 				gfs2_log_write_bh(sdp, bd2->bd_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			gfs2_log_lock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			if (++n >= num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		BUG_ON(total < num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		total -= num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	gfs2_log_unlock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	unsigned int nbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	if (tr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	struct list_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	struct gfs2_bufdata *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	if (tr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	head = &tr->tr_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	while (!list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		list_del_init(&bd->bd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		gfs2_unpin(sdp, bd->bd_bh, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) static void buf_lo_before_scan(struct gfs2_jdesc *jd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 			       struct gfs2_log_header_host *head, int pass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	if (pass != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	jd->jd_found_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	jd->jd_replayed_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 				struct gfs2_log_descriptor *ld, __be64 *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 				int pass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	struct gfs2_glock *gl = ip->i_gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	unsigned int blks = be32_to_cpu(ld->ld_data1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	struct buffer_head *bh_log, *bh_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	u64 blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	gfs2_replay_incr_blk(jd, &start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		blkno = be64_to_cpu(*ptr++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		jd->jd_found_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		if (gfs2_revoke_check(jd, blkno, start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		error = gfs2_replay_read_block(jd, start, &bh_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		bh_ip = gfs2_meta_new(gl, blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		if (gfs2_meta_check(sdp, bh_ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 			error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			struct gfs2_meta_header *mh =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 				(struct gfs2_meta_header *)bh_ip->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 				struct gfs2_rgrpd *rgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 				rgd = gfs2_blk2rgrpd(sdp, blkno, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 				if (rgd && rgd->rd_addr == blkno &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 				    rgd->rd_bits && rgd->rd_bits->bi_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 					fs_info(sdp, "Replaying 0x%llx but we "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 						"already have a bh!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 						(unsigned long long)blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 					fs_info(sdp, "busy:%d, pinned:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 						buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 						buffer_pinned(rgd->rd_bits->bi_bh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 					gfs2_dump_glock(NULL, rgd->rd_gl, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 			mark_buffer_dirty(bh_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		brelse(bh_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		brelse(bh_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		jd->jd_replayed_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		gfs2_inode_metasync(ip->i_gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	if (pass != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	gfs2_inode_metasync(ip->i_gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	        jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	struct gfs2_meta_header *mh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	struct list_head *head = &sdp->sd_log_revokes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	struct gfs2_bufdata *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	unsigned int length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	gfs2_write_revokes(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	if (!sdp->sd_log_num_revoke)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	offset = sizeof(struct gfs2_log_descriptor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	list_for_each_entry(bd, head, bd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		sdp->sd_log_num_revoke--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 			gfs2_log_write_page(sdp, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 			mh = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 			clear_page(mh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 			mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 			offset = sizeof(struct gfs2_meta_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		*(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		offset += sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	gfs2_log_write_page(sdp, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	struct list_head *head = &sdp->sd_log_revokes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	struct gfs2_bufdata *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	struct gfs2_glock *gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	while (!list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		list_del_init(&bd->bd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		gl = bd->bd_gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		gfs2_glock_remove_revoke(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		kmem_cache_free(gfs2_bufdata_cachep, bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 				  struct gfs2_log_header_host *head, int pass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	if (pass != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	jd->jd_found_revokes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	jd->jd_replay_tail = head->lh_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 				   struct gfs2_log_descriptor *ld, __be64 *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 				   int pass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	unsigned int blks = be32_to_cpu(ld->ld_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	unsigned int revokes = be32_to_cpu(ld->ld_data1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	u64 blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	int first = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	offset = sizeof(struct gfs2_log_descriptor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		error = gfs2_replay_read_block(jd, start, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 			blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			error = gfs2_revoke_add(jd, blkno, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 			if (error < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 				brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 				return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			else if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 				jd->jd_found_revokes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 			if (!--revokes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			offset += sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		offset = sizeof(struct gfs2_meta_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		first = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		gfs2_revoke_clean(jd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	if (pass != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	fs_info(sdp, "jid=%u: Found %u revoke tags\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	        jd->jd_jid, jd->jd_found_revokes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	gfs2_revoke_clean(jd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972)  * databuf_lo_before_commit - Scan the data buffers, writing as we go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	unsigned int limit = databuf_limit(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	unsigned int nbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	if (tr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 				    struct gfs2_log_descriptor *ld,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 				    __be64 *ptr, int pass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	struct gfs2_glock *gl = ip->i_gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	unsigned int blks = be32_to_cpu(ld->ld_data1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	struct buffer_head *bh_log, *bh_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	u64 blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	u64 esc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	gfs2_replay_incr_blk(jd, &start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		blkno = be64_to_cpu(*ptr++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		esc = be64_to_cpu(*ptr++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		jd->jd_found_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		if (gfs2_revoke_check(jd, blkno, start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		error = gfs2_replay_read_block(jd, start, &bh_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		bh_ip = gfs2_meta_new(gl, blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		/* Unescape */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		if (esc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 			__be32 *eptr = (__be32 *)bh_ip->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			*eptr = cpu_to_be32(GFS2_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		mark_buffer_dirty(bh_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		brelse(bh_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		brelse(bh_ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		jd->jd_replayed_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /* FIXME: sort out accounting for log blocks etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		gfs2_inode_metasync(ip->i_gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	if (pass != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	/* data sync? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	gfs2_inode_metasync(ip->i_gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	struct list_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	struct gfs2_bufdata *bd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	if (tr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	head = &tr->tr_databuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	while (!list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		list_del_init(&bd->bd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		gfs2_unpin(sdp, bd->bd_bh, tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) static const struct gfs2_log_operations gfs2_buf_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	.lo_before_commit = buf_lo_before_commit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	.lo_after_commit = buf_lo_after_commit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	.lo_before_scan = buf_lo_before_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	.lo_scan_elements = buf_lo_scan_elements,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	.lo_after_scan = buf_lo_after_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	.lo_name = "buf",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static const struct gfs2_log_operations gfs2_revoke_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	.lo_before_commit = revoke_lo_before_commit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	.lo_after_commit = revoke_lo_after_commit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	.lo_before_scan = revoke_lo_before_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	.lo_scan_elements = revoke_lo_scan_elements,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	.lo_after_scan = revoke_lo_after_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	.lo_name = "revoke",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) static const struct gfs2_log_operations gfs2_databuf_lops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	.lo_before_commit = databuf_lo_before_commit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	.lo_after_commit = databuf_lo_after_commit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	.lo_scan_elements = databuf_lo_scan_elements,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	.lo_after_scan = databuf_lo_after_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	.lo_name = "databuf",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) const struct gfs2_log_operations *gfs2_log_ops[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	&gfs2_databuf_lops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	&gfs2_buf_lops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	&gfs2_revoke_lops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)