Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/gfs2_ondisk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/crc32c.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include "gfs2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include "incore.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "bmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include "glock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include "glops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include "log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include "lops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include "meta_io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include "recovery.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include "super.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include "util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include "dir.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) struct workqueue_struct *gfs_recovery_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 			   struct buffer_head **bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct gfs2_glock *gl = ip->i_gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	int new = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	u64 dblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	u32 extlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	error = gfs2_extent_map(&ip->i_inode, blk, &new, &dblock, &extlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	if (!dblock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		gfs2_consist_inode(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	*bh = gfs2_meta_ra(gl, dblock, extlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	struct list_head *head = &jd->jd_revoke_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	struct gfs2_revoke_replay *rr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	list_for_each_entry(rr, head, rr_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		if (rr->rr_blkno == blkno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 			found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	if (found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		rr->rr_where = where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	rr = kmalloc(sizeof(struct gfs2_revoke_replay), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	if (!rr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	rr->rr_blkno = blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	rr->rr_where = where;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	list_add(&rr->rr_list, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	struct gfs2_revoke_replay *rr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	int wrap, a, b, revoke;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	list_for_each_entry(rr, &jd->jd_revoke_list, rr_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		if (rr->rr_blkno == blkno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	wrap = (rr->rr_where < jd->jd_replay_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	a = (jd->jd_replay_tail < where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	b = (where < rr->rr_where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	revoke = (wrap) ? (a || b) : (a && b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	return revoke;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) void gfs2_revoke_clean(struct gfs2_jdesc *jd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	struct list_head *head = &jd->jd_revoke_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	struct gfs2_revoke_replay *rr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	while (!list_empty(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		rr = list_first_entry(head, struct gfs2_revoke_replay, rr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		list_del(&rr->rr_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		kfree(rr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int __get_log_header(struct gfs2_sbd *sdp, const struct gfs2_log_header *lh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		     unsigned int blkno, struct gfs2_log_header_host *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	u32 hash, crc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	if (lh->lh_header.mh_magic != cpu_to_be32(GFS2_MAGIC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	    lh->lh_header.mh_type != cpu_to_be32(GFS2_METATYPE_LH) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	    (blkno && be32_to_cpu(lh->lh_blkno) != blkno))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	hash = crc32(~0, lh, LH_V1_SIZE - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	hash = ~crc32_le_shift(hash, 4); /* assume lh_hash is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (be32_to_cpu(lh->lh_hash) != hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	crc = crc32c(~0, (void *)lh + LH_V1_SIZE + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		     sdp->sd_sb.sb_bsize - LH_V1_SIZE - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	if ((lh->lh_crc != 0 && be32_to_cpu(lh->lh_crc) != crc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	head->lh_sequence = be64_to_cpu(lh->lh_sequence);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	head->lh_flags = be32_to_cpu(lh->lh_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	head->lh_tail = be32_to_cpu(lh->lh_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	head->lh_blkno = be32_to_cpu(lh->lh_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	head->lh_local_total = be64_to_cpu(lh->lh_local_total);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	head->lh_local_free = be64_to_cpu(lh->lh_local_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	head->lh_local_dinodes = be64_to_cpu(lh->lh_local_dinodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * get_log_header - read the log header for a given segment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * @jd: the journal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * @blk: the block to look at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * @lh: the log header to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * Read the log header for a given segement in a given journal.  Do a few
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * sanity checks on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * Returns: 0 on success,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  *          1 if the header was invalid or incomplete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  *          errno on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			  struct gfs2_log_header_host *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	error = gfs2_replay_read_block(jd, blk, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	error = __get_log_header(sdp, (const struct gfs2_log_header *)bh->b_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 				 blk, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * foreach_descriptor - go through the active part of the log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * @jd: the journal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  * @start: the first log header in the active region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  * @end: the last log header (don't process the contents of this entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  * Call a given function once for every log descriptor in the active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  * portion of the log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  * Returns: errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static int foreach_descriptor(struct gfs2_jdesc *jd, u32 start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			      unsigned int end, int pass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	struct gfs2_log_descriptor *ld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	u32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	__be64 *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	unsigned int offset = sizeof(struct gfs2_log_descriptor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	offset += sizeof(__be64) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	offset &= ~(sizeof(__be64) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	while (start != end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		error = gfs2_replay_read_block(jd, start, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		if (gfs2_meta_check(sdp, bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		ld = (struct gfs2_log_descriptor *)bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		length = be32_to_cpu(ld->ld_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		if (be32_to_cpu(ld->ld_header.mh_type) == GFS2_METATYPE_LH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 			struct gfs2_log_header_host lh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			error = get_log_header(jd, start, &lh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 				gfs2_replay_incr_blk(jd, &start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 				brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			if (error == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 				gfs2_consist_inode(GFS2_I(jd->jd_inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 				error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 			brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		} else if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 			brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		ptr = (__be64 *)(bh->b_data + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		error = lops_scan_elements(jd, start, ld, ptr, pass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		while (length--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			gfs2_replay_incr_blk(jd, &start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)  * clean_journal - mark a dirty journal as being clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  * @jd: the journal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  * @head: the head journal to start from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * Returns: errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static void clean_journal(struct gfs2_jdesc *jd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 			  struct gfs2_log_header_host *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	u32 lblock = head->lh_blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	gfs2_replay_incr_blk(jd, &lblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	gfs2_write_log_header(sdp, jd, head->lh_sequence + 1, 0, lblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 			      GFS2_LOG_HEAD_UNMOUNT | GFS2_LOG_HEAD_RECOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			      REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		sdp->sd_log_flush_head = lblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		gfs2_log_incr_head(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static void gfs2_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)                                unsigned int message)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	char env_jid[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	char env_status[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	char *envp[] = { env_jid, env_status, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)         ls->ls_recover_jid_done = jid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)         ls->ls_recover_jid_status = message;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	sprintf(env_jid, "JID=%u", jid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	sprintf(env_status, "RECOVERY=%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		message == LM_RD_SUCCESS ? "Done" : "Failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)         kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	if (sdp->sd_lockstruct.ls_ops->lm_recovery_result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		sdp->sd_lockstruct.ls_ops->lm_recovery_result(sdp, jid, message);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  * update_statfs_inode - Update the master statfs inode or zero out the local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  *			 statfs inode for a given journal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  * @jd: The journal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  * @head: If NULL, @inode is the local statfs inode and we need to zero it out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  *	  Otherwise, it @head contains the statfs change info that needs to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  *	  synced to the master statfs inode (pointed to by @inode).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  * @inode: statfs inode to update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) static int update_statfs_inode(struct gfs2_jdesc *jd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			       struct gfs2_log_header_host *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 			       struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	struct gfs2_inode *ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	struct gfs2_statfs_change_host sc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	BUG_ON(!inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	ip = GFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	error = gfs2_meta_inode_buffer(ip, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	spin_lock(&sdp->sd_statfs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	if (head) { /* Update the master statfs inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		gfs2_statfs_change_in(&sc, bh->b_data + sizeof(struct gfs2_dinode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		sc.sc_total += head->lh_local_total;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		sc.sc_free += head->lh_local_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		sc.sc_dinodes += head->lh_local_dinodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		gfs2_statfs_change_out(&sc, bh->b_data + sizeof(struct gfs2_dinode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		fs_info(sdp, "jid=%u: Updated master statfs Total:%lld, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			"Free:%lld, Dinodes:%lld after change "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			"[%+lld,%+lld,%+lld]\n", jd->jd_jid, sc.sc_total,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			sc.sc_free, sc.sc_dinodes, head->lh_local_total,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			head->lh_local_free, head->lh_local_dinodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	} else { /* Zero out the local statfs inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		memset(bh->b_data + sizeof(struct gfs2_dinode), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		       sizeof(struct gfs2_statfs_change));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		/* If it's our own journal, reset any in-memory changes too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		if (jd->jd_jid == sdp->sd_lockstruct.ls_jid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			memset(&sdp->sd_statfs_local, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			       sizeof(struct gfs2_statfs_change_host));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	spin_unlock(&sdp->sd_statfs_spin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	gfs2_inode_metasync(ip->i_gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)  * recover_local_statfs - Update the master and local statfs changes for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)  *			  journal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  * Previously, statfs updates would be read in from the local statfs inode and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  * synced to the master statfs inode during recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)  * We now use the statfs updates in the journal head to update the master statfs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)  * inode instead of reading in from the local statfs inode. To preserve backward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)  * compatibility with kernels that can't do this, we still need to keep the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)  * local statfs inode up to date by writing changes to it. At some point in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)  * future, we can do away with the local statfs inodes altogether and keep the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)  * statfs changes solely in the journal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)  * @jd: the journal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)  * @head: the journal head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)  * Returns: errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static void recover_local_statfs(struct gfs2_jdesc *jd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 				 struct gfs2_log_header_host *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	if (!head->lh_local_total && !head->lh_local_free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	    && !head->lh_local_dinodes) /* No change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		goto zero_local;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	 /* First update the master statfs inode with the changes we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	  * found in the journal. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	error = update_statfs_inode(jd, head, sdp->sd_statfs_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) zero_local:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	/* Zero out the local statfs inode so any changes in there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	 * are not re-recovered. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	error = update_statfs_inode(jd, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 				    find_local_statfs_inode(sdp, jd->jd_jid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) void gfs2_recover_func(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	struct gfs2_jdesc *jd = container_of(work, struct gfs2_jdesc, jd_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	struct gfs2_log_header_host head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	struct gfs2_holder j_gh, ji_gh, thaw_gh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	ktime_t t_start, t_jlck, t_jhd, t_tlck, t_rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	int ro = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	unsigned int pass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	int jlocked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	if (gfs2_withdrawn(sdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		fs_err(sdp, "jid=%u: Recovery not attempted due to withdraw.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		       jd->jd_jid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	t_start = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	if (sdp->sd_args.ar_spectator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	if (jd->jd_jid != sdp->sd_lockstruct.ls_jid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		fs_info(sdp, "jid=%u: Trying to acquire journal lock...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 			jd->jd_jid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		jlocked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		/* Acquire the journal lock so we can do recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		error = gfs2_glock_nq_num(sdp, jd->jd_jid, &gfs2_journal_glops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 					  LM_ST_EXCLUSIVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 					  LM_FLAG_NOEXP | LM_FLAG_TRY | GL_NOCACHE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 					  &j_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		switch (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		case GLR_TRYFAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 			fs_info(sdp, "jid=%u: Busy\n", jd->jd_jid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 			error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 					   LM_FLAG_NOEXP | GL_NOCACHE, &ji_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 			goto fail_gunlock_j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		fs_info(sdp, "jid=%u, already locked for use\n", jd->jd_jid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	t_jlck = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	fs_info(sdp, "jid=%u: Looking at journal...\n", jd->jd_jid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	error = gfs2_jdesc_check(jd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		goto fail_gunlock_ji;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	error = gfs2_find_jhead(jd, &head, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		goto fail_gunlock_ji;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	t_jhd = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	fs_info(sdp, "jid=%u: Journal head lookup took %lldms\n", jd->jd_jid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		ktime_ms_delta(t_jhd, t_jlck));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		fs_info(sdp, "jid=%u: Acquiring the transaction lock...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 			jd->jd_jid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		/* Acquire a shared hold on the freeze lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		error = gfs2_freeze_lock(sdp, &thaw_gh, LM_FLAG_PRIORITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 			goto fail_gunlock_ji;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		if (test_bit(SDF_RORECOVERY, &sdp->sd_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 			ro = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		} else if (test_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 			if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 				ro = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			if (sb_rdonly(sdp->sd_vfs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 				/* check if device itself is read-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 				ro = bdev_read_only(sdp->sd_vfs->s_bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 				if (!ro) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 					fs_info(sdp, "recovery required on "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 						"read-only filesystem.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 					fs_info(sdp, "write access will be "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 						"enabled during recovery.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		if (ro) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 			fs_warn(sdp, "jid=%u: Can't replay: read-only block "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 				"device\n", jd->jd_jid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 			error = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 			goto fail_gunlock_thaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		t_tlck = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		fs_info(sdp, "jid=%u: Replaying journal...0x%x to 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 			jd->jd_jid, head.lh_tail, head.lh_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		/* We take the sd_log_flush_lock here primarily to prevent log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		 * flushes and simultaneous journal replays from stomping on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		 * each other wrt sd_log_bio. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		down_read(&sdp->sd_log_flush_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		for (pass = 0; pass < 2; pass++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 			lops_before_scan(jd, &head, pass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 			error = foreach_descriptor(jd, head.lh_tail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 						   head.lh_blkno, pass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 			lops_after_scan(jd, error, pass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 			if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 				up_read(&sdp->sd_log_flush_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 				goto fail_gunlock_thaw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		recover_local_statfs(jd, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		clean_journal(jd, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		up_read(&sdp->sd_log_flush_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		gfs2_freeze_unlock(&thaw_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 		t_rep = ktime_get();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		fs_info(sdp, "jid=%u: Journal replayed in %lldms [jlck:%lldms, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 			"jhead:%lldms, tlck:%lldms, replay:%lldms]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 			jd->jd_jid, ktime_ms_delta(t_rep, t_start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 			ktime_ms_delta(t_jlck, t_start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 			ktime_ms_delta(t_jhd, t_jlck),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 			ktime_ms_delta(t_tlck, t_jhd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 			ktime_ms_delta(t_rep, t_tlck));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	if (jlocked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		gfs2_glock_dq_uninit(&ji_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		gfs2_glock_dq_uninit(&j_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	fs_info(sdp, "jid=%u: Done\n", jd->jd_jid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) fail_gunlock_thaw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	gfs2_freeze_unlock(&thaw_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) fail_gunlock_ji:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	if (jlocked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		gfs2_glock_dq_uninit(&ji_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) fail_gunlock_j:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		gfs2_glock_dq_uninit(&j_gh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	fs_info(sdp, "jid=%u: %s\n", jd->jd_jid, (error) ? "Failed" : "Done");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	jd->jd_recover_error = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	clear_bit(JDF_RECOVERY, &jd->jd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	smp_mb__after_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	wake_up_bit(&jd->jd_flags, JDF_RECOVERY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	if (test_and_set_bit(JDF_RECOVERY, &jd->jd_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	/* we have JDF_RECOVERY, queue should always succeed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	rv = queue_work(gfs_recovery_wq, &jd->jd_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	BUG_ON(!rv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 			    TASK_UNINTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	return wait ? jd->jd_recover_error : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)