^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/gfs2_ondisk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/posix_acl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "gfs2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "incore.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "bmap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "glock.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "glops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "log.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "meta_io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "recovery.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "rgrp.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "util.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "trans.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "dir.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "lops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct workqueue_struct *gfs2_freeze_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) extern struct workqueue_struct *gfs2_control_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) fs_err(gl->gl_name.ln_sbd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) "state 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) bh, (unsigned long long)bh->b_blocknr, bh->b_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) bh->b_page->mapping, bh->b_page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) gl->gl_name.ln_type, gl->gl_name.ln_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) gfs2_glock2aspace(gl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) gfs2_withdraw(gl->gl_name.ln_sbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * @fsync: set when called from fsync (not all buffers will be clean)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * None of the buffers should be dirty, locked, or pinned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned int nr_revokes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct list_head *head = &gl->gl_ail_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct gfs2_bufdata *bd, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) gfs2_log_lock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) spin_lock(&sdp->sd_ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (nr_revokes == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) bh = bd->bd_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (bh->b_state & b_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (fsync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) gfs2_ail_error(gl, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) gfs2_trans_add_revoke(sdp, bd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) nr_revokes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) spin_unlock(&sdp->sd_ail_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) gfs2_log_unlock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct gfs2_trans tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) memset(&tr, 0, sizeof(tr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) INIT_LIST_HEAD(&tr.tr_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) INIT_LIST_HEAD(&tr.tr_databuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) INIT_LIST_HEAD(&tr.tr_ail1_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) INIT_LIST_HEAD(&tr.tr_ail2_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) tr.tr_revokes = atomic_read(&gl->gl_ail_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (!tr.tr_revokes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) bool have_revokes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) bool log_in_flight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * We have nothing on the ail, but there could be revokes on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * the sdp revoke queue, in which case, we still want to flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * the log and wait for it to finish.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * If the sdp revoke list is empty too, we might still have an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * io outstanding for writing revokes, so we should wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * it before returning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * If none of these conditions are true, our revokes are all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * flushed and we can return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) gfs2_log_lock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) have_revokes = !list_empty(&sdp->sd_log_revokes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) log_in_flight = atomic_read(&sdp->sd_log_in_flight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) gfs2_log_unlock(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (have_revokes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) goto flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (log_in_flight)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) log_flush_wait(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* A shortened, inline version of gfs2_trans_begin()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * tr->alloced is not set since the transaction structure is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * on the stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) tr.tr_ip = _RET_IP_;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) ret = gfs2_log_reserve(sdp, tr.tr_reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) WARN_ON_ONCE(current->journal_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) current->journal_info = &tr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) __gfs2_ail_flush(gl, 0, tr.tr_revokes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) gfs2_trans_end(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) flush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) GFS2_LFC_AIL_EMPTY_GL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned int revokes = atomic_read(&gl->gl_ail_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!revokes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) while (revokes > max_revokes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ret = gfs2_trans_begin(sdp, 0, max_revokes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) __gfs2_ail_flush(gl, fsync, max_revokes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) gfs2_trans_end(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) GFS2_LFC_AIL_FLUSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * gfs2_rgrp_metasync - sync out the metadata of a resource group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @gl: the glock protecting the resource group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct address_space *metamapping = &sdp->sd_aspace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) const unsigned bsize = sdp->sd_sb.sb_bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) filemap_fdatawrite_range(metamapping, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) error = filemap_fdatawait_range(metamapping, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) mapping_set_error(metamapping, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) gfs2_io_error(sdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * rgrp_go_sync - sync out the metadata for this glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * Called when demoting or unlocking an EX glock. We must flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * to disk all dirty buffers/pages relating to this glock, and must not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * return to caller to demote/unlock the glock until I/O is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static int rgrp_go_sync(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) GFS2_LFC_RGRP_GO_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) error = gfs2_rgrp_metasync(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) error = gfs2_ail_empty_gl(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) gfs2_free_clones(rgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * rgrp_go_inval - invalidate the metadata for this glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * @flags:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * We never used LM_ST_DEFERRED with resource groups, so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * should always see the metadata flag set here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct address_space *mapping = &sdp->sd_aspace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) const unsigned bsize = sdp->sd_sb.sb_bsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) gfs2_rgrp_brelse(rgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) WARN_ON_ONCE(!(flags & DIO_METADATA));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) truncate_inode_pages_range(mapping, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) const char *fs_id_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct gfs2_rgrpd *rgd = gl->gl_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (rgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) gfs2_rgrp_dump(seq, rgd, fs_id_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct gfs2_inode *ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ip = gl->gl_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) set_bit(GIF_GLOP_PENDING, &ip->i_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct gfs2_rgrpd *rgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) spin_lock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) rgd = gl->gl_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) spin_unlock(&gl->gl_lockref.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return rgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (!ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * gfs2_inode_metasync - sync out the metadata of an inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * @gl: the glock protecting the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int gfs2_inode_metasync(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct address_space *metamapping = gfs2_glock2aspace(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) filemap_fdatawrite(metamapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) error = filemap_fdatawait(metamapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) gfs2_io_error(gl->gl_name.ln_sbd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * inode_go_sync - Sync the dirty metadata of an inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * @gl: the glock protecting the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int inode_go_sync(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct gfs2_inode *ip = gfs2_glock2inode(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int isreg = ip && S_ISREG(ip->i_inode.i_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct address_space *metamapping = gfs2_glock2aspace(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int error = 0, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (isreg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) inode_dio_wait(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) GFS2_LFC_INODE_GO_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) filemap_fdatawrite(metamapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (isreg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct address_space *mapping = ip->i_inode.i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) filemap_fdatawrite(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) error = filemap_fdatawait(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) mapping_set_error(mapping, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ret = gfs2_inode_metasync(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) error = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) gfs2_ail_empty_gl(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * Writeback of the data mapping may cause the dirty flag to be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * so we have to clear it again here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) smp_mb__before_atomic();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) clear_bit(GLF_DIRTY, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) gfs2_clear_glop_pending(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * inode_go_inval - prepare a inode glock to be released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * @flags:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * Normally we invalidate everything, but if we are moving into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * can keep hold of the metadata, since it won't have changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static void inode_go_inval(struct gfs2_glock *gl, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct gfs2_inode *ip = gfs2_glock2inode(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (flags & DIO_METADATA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) struct address_space *mapping = gfs2_glock2aspace(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) truncate_inode_pages(mapping, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) set_bit(GIF_INVALID, &ip->i_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) forget_all_cached_acls(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) security_inode_invalidate_secctx(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) gfs2_dir_hash_inval(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) GFS2_LOG_HEAD_FLUSH_NORMAL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) GFS2_LFC_INODE_GO_INVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (ip && S_ISREG(ip->i_inode.i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) truncate_inode_pages(ip->i_inode.i_mapping, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) gfs2_clear_glop_pending(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * Returns: 1 if it's ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static int inode_go_demote_ok(const struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) const struct gfs2_dinode *str = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct timespec64 atime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) u16 height, depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) goto corrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) ip->i_inode.i_rdev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) switch (ip->i_inode.i_mode & S_IFMT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) case S_IFBLK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) case S_IFCHR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) be32_to_cpu(str->di_minor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) atime.tv_sec = be64_to_cpu(str->di_atime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ip->i_inode.i_atime = atime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ip->i_goal = be64_to_cpu(str->di_goal_meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ip->i_generation = be64_to_cpu(str->di_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ip->i_diskflags = be32_to_cpu(str->di_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ip->i_eattr = be64_to_cpu(str->di_eattr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) gfs2_set_inode_flags(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) height = be16_to_cpu(str->di_height);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (unlikely(height > GFS2_MAX_META_HEIGHT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) goto corrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) ip->i_height = (u8)height;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) depth = be16_to_cpu(str->di_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) goto corrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ip->i_depth = (u8)depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) ip->i_entries = be32_to_cpu(str->di_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (S_ISREG(ip->i_inode.i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) gfs2_set_aops(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) corrupt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) gfs2_consist_inode(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * gfs2_inode_refresh - Refresh the incore copy of the dinode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * @ip: The GFS2 inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * Returns: errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) int gfs2_inode_refresh(struct gfs2_inode *ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct buffer_head *dibh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) error = gfs2_meta_inode_buffer(ip, &dibh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) error = gfs2_dinode_in(ip, dibh->b_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) brelse(dibh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) clear_bit(GIF_INVALID, &ip->i_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * inode_go_lock - operation done after an inode lock is locked by a process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * @flags:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * Returns: errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int inode_go_lock(struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct gfs2_glock *gl = gh->gh_gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct gfs2_inode *ip = gl->gl_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (!ip || (gh->gh_flags & GL_SKIP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (test_bit(GIF_INVALID, &ip->i_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) error = gfs2_inode_refresh(ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (gh->gh_state != LM_ST_DEFERRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) inode_dio_wait(&ip->i_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) (gl->gl_state == LM_ST_EXCLUSIVE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) (gh->gh_state == LM_ST_EXCLUSIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) spin_lock(&sdp->sd_trunc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (list_empty(&ip->i_trunc_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) spin_unlock(&sdp->sd_trunc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) wake_up(&sdp->sd_quota_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * inode_go_dump - print information about an inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * @seq: The iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * @ip: the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * @fs_id_buf: file system id (may be empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) const char *fs_id_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) struct gfs2_inode *ip = gl->gl_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct inode *inode = &ip->i_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) unsigned long nrpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (ip == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) xa_lock_irq(&inode->i_data.i_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) nrpages = inode->i_data.nrpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) xa_unlock_irq(&inode->i_data.i_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) "p:%lu\n", fs_id_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) (unsigned long long)ip->i_no_formal_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) (unsigned long long)ip->i_no_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) IF2DT(ip->i_inode.i_mode), ip->i_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) (unsigned int)ip->i_diskflags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) (unsigned long long)i_size_read(inode), nrpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * freeze_go_sync - promote/demote the freeze glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * @state: the requested state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * @flags:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static int freeze_go_sync(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) int error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * all the nodes should have the freeze glock in SH mode and they all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * call do_xmote: One for EX and the others for UN. They ALL must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * freeze locally, and they ALL must queue freeze work. The freeze_work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * calls freeze_func, which tries to reacquire the freeze glock in SH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * effectively waiting for the thaw on the node who holds it in EX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * Once thawed, the work func acquires the freeze glock in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * SH and everybody goes back to thawed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) error = freeze_super(sdp->sd_vfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (gfs2_withdrawn(sdp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) gfs2_assert_withdraw(sdp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) GFS2_LFC_FREEZE_GO_SYNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) else /* read-only mounts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * freeze_go_xmote_bh - After promoting/demoting the freeze glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) struct gfs2_glock *j_gl = ip->i_gl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct gfs2_log_header_host head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (gfs2_assert_withdraw_delayed(sdp, !error))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) GFS2_LOG_HEAD_UNMOUNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) sdp->sd_log_sequence = head.lh_sequence + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) gfs2_log_pointers_init(sdp, head.lh_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * trans_go_demote_ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * Always returns 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static int freeze_go_demote_ok(const struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * iopen_go_callback - schedule the dcache entry for the inode to be deleted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * @gl: the glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * gl_lockref.lock lock is held while calling this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct gfs2_inode *ip = gl->gl_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (!remote || sb_rdonly(sdp->sd_vfs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (gl->gl_demote_state == LM_ST_UNLOCKED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) gl->gl_state == LM_ST_SHARED && ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) gl->gl_lockref.count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (!queue_delayed_work(gfs2_delete_workqueue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) &gl->gl_delete, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) gl->gl_lockref.count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static int iopen_go_demote_ok(const struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return !gfs2_delete_work_queued(gl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * @gl: glock being freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * For now, this is only used for the journal inode glock. In withdraw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * situations, we need to wait for the glock to be freed so that we know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * other nodes may proceed with recovery / journal replay.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) static void inode_go_free(struct gfs2_glock *gl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) /* Note that we cannot reference gl_object because it's already set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * to NULL by this point in its lifecycle. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (!test_bit(GLF_FREEING, &gl->gl_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) wake_up_bit(&gl->gl_flags, GLF_FREEING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * nondisk_go_callback - used to signal when a node did a withdraw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * @gl: the nondisk glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * @remote: true if this came from a different cluster node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /* Ignore the callback unless it's from another node, and it's the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) live lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /* First order of business is to cancel the demote request. We don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * really want to demote a nondisk glock. At best it's just to inform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * us of another node's withdraw. We'll keep it in SH mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) clear_bit(GLF_DEMOTE, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /* We only care when a node wants us to unlock, because that means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * they want a journal recovered. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (gl->gl_demote_state != LM_ST_UNLOCKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) if (sdp->sd_args.ar_spectator) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) fs_warn(sdp, "Spectator node cannot recover journals.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * We can't call remote_withdraw directly here or gfs2_recover_journal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * because this is called from the glock unlock function and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * remote_withdraw needs to enqueue and dequeue the same "live" glock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * we were called from. So we queue it to the control work queue in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * lock_dlm.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) const struct gfs2_glock_operations gfs2_meta_glops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) .go_type = LM_TYPE_META,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) .go_flags = GLOF_NONDISK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) const struct gfs2_glock_operations gfs2_inode_glops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) .go_sync = inode_go_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) .go_inval = inode_go_inval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) .go_demote_ok = inode_go_demote_ok,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) .go_lock = inode_go_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) .go_dump = inode_go_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) .go_type = LM_TYPE_INODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) .go_free = inode_go_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) const struct gfs2_glock_operations gfs2_rgrp_glops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) .go_sync = rgrp_go_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) .go_inval = rgrp_go_inval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) .go_lock = gfs2_rgrp_go_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) .go_dump = gfs2_rgrp_go_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) .go_type = LM_TYPE_RGRP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) .go_flags = GLOF_LVB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) const struct gfs2_glock_operations gfs2_freeze_glops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) .go_sync = freeze_go_sync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) .go_xmote_bh = freeze_go_xmote_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) .go_demote_ok = freeze_go_demote_ok,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) .go_type = LM_TYPE_NONDISK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) .go_flags = GLOF_NONDISK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) const struct gfs2_glock_operations gfs2_iopen_glops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) .go_type = LM_TYPE_IOPEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) .go_callback = iopen_go_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) .go_demote_ok = iopen_go_demote_ok,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) .go_flags = GLOF_LRU | GLOF_NONDISK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) .go_subclass = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) const struct gfs2_glock_operations gfs2_flock_glops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) .go_type = LM_TYPE_FLOCK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) .go_flags = GLOF_LRU | GLOF_NONDISK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) const struct gfs2_glock_operations gfs2_nondisk_glops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) .go_type = LM_TYPE_NONDISK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) .go_flags = GLOF_NONDISK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) .go_callback = nondisk_go_callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) const struct gfs2_glock_operations gfs2_quota_glops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) .go_type = LM_TYPE_QUOTA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) const struct gfs2_glock_operations gfs2_journal_glops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) .go_type = LM_TYPE_JOURNAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) .go_flags = GLOF_NONDISK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) const struct gfs2_glock_operations *gfs2_glops_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) [LM_TYPE_META] = &gfs2_meta_glops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) [LM_TYPE_INODE] = &gfs2_inode_glops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) [LM_TYPE_FLOCK] = &gfs2_flock_glops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) [LM_TYPE_QUOTA] = &gfs2_quota_glops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)