^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* -*- mode: c; c-basic-offset: 8; -*-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * vim: noexpandtab sw=8 ts=8 sts=0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * refcounttree.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2009 Oracle. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <cluster/masklog.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "ocfs2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include "inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "alloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "suballoc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "journal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "uptodate.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include "super.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "buffer_head_io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "blockcheck.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "refcounttree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "sysfile.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "dlmglue.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "extent_map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "aops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "xattr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "namei.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "ocfs2_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "file.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/bio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/writeback.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/pagevec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/fsnotify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <linux/quotaops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <linux/namei.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/mount.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/posix_acl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct ocfs2_cow_context {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u32 cow_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u32 cow_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct ocfs2_extent_tree data_et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct ocfs2_refcount_tree *ref_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct buffer_head *ref_root_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct ocfs2_alloc_context *meta_ac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct ocfs2_alloc_context *data_ac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct ocfs2_cached_dealloc_ctxt dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) void *cow_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct ocfs2_post_refcount *post_refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int extra_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) int (*get_clusters)(struct ocfs2_cow_context *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) u32 v_cluster, u32 *p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u32 *num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned int *extent_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) int (*cow_duplicate_clusters)(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u32 cpos, u32 old_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) u32 new_cluster, u32 new_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static inline struct ocfs2_refcount_tree *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) cache_info_to_refcount(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return container_of(ci, struct ocfs2_refcount_tree, rf_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static int ocfs2_validate_refcount_block(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct ocfs2_refcount_block *rb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) (struct ocfs2_refcount_block *)bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) trace_ocfs2_validate_refcount_block((unsigned long long)bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) BUG_ON(!buffer_uptodate(bh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * If the ecc fails, we return the error but otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * leave the filesystem running. We know any error is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * local to this block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) mlog(ML_ERROR, "Checksum failed for refcount block %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) (unsigned long long)bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) rc = ocfs2_error(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) "Refcount block #%llu has bad signature %.*s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) (unsigned long long)bh->b_blocknr, 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) rb->rf_signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) rc = ocfs2_error(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) "Refcount block #%llu has an invalid rf_blkno of %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) (unsigned long long)bh->b_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) (unsigned long long)le64_to_cpu(rb->rf_blkno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) rc = ocfs2_error(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) "Refcount block #%llu has an invalid rf_fs_generation of #%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) (unsigned long long)bh->b_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) le32_to_cpu(rb->rf_fs_generation));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u64 rb_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct buffer_head **bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct buffer_head *tmp = *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) rc = ocfs2_read_block(ci, rb_blkno, &tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ocfs2_validate_refcount_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* If ocfs2_read_block() got us a new bh, pass it up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!rc && !*bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) *bh = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return rf->rf_blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static struct super_block *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return rf->rf_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) __acquires(&rf->rf_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) spin_lock(&rf->rf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) __releases(&rf->rf_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) spin_unlock(&rf->rf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) mutex_lock(&rf->rf_io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) mutex_unlock(&rf->rf_io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) .co_owner = ocfs2_refcount_cache_owner,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) .co_get_super = ocfs2_refcount_cache_get_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) .co_cache_lock = ocfs2_refcount_cache_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) .co_cache_unlock = ocfs2_refcount_cache_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) .co_io_lock = ocfs2_refcount_cache_io_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) .co_io_unlock = ocfs2_refcount_cache_io_unlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static struct ocfs2_refcount_tree *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct rb_node *n = osb->osb_rf_lock_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct ocfs2_refcount_tree *tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (blkno < tree->rf_blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) else if (blkno > tree->rf_blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* osb_lock is already locked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct ocfs2_refcount_tree *new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) u64 rf_blkno = new->rf_blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct ocfs2_refcount_tree *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) tmp = rb_entry(parent, struct ocfs2_refcount_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) rf_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (rf_blkno < tmp->rf_blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) else if (rf_blkno > tmp->rf_blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* This should never happen! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) mlog(ML_ERROR, "Duplicate refcount block %llu found!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) (unsigned long long)rf_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) rb_link_node(&new->rf_node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) ocfs2_metadata_cache_exit(&tree->rf_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) ocfs2_lock_res_free(&tree->rf_lockres);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) kfree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct ocfs2_refcount_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) osb->osb_ref_tree_lru = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct ocfs2_refcount_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) spin_lock(&osb->osb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) spin_unlock(&osb->osb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static void ocfs2_kref_remove_refcount_tree(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct ocfs2_refcount_tree *tree =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ocfs2_free_refcount_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) kref_get(&tree->rf_getcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) mutex_init(&new->rf_io_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) new->rf_sb = sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) spin_lock_init(&new->rf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct ocfs2_refcount_tree *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) u64 rf_blkno, u32 generation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) init_rwsem(&new->rf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) ocfs2_refcount_lock_res_init(&new->rf_lockres, osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) rf_blkno, generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static struct ocfs2_refcount_tree*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct ocfs2_refcount_tree *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) new->rf_blkno = rf_blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) kref_init(&new->rf_getcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ocfs2_init_refcount_tree_ci(new, osb->sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct ocfs2_refcount_tree **ret_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct ocfs2_refcount_tree *tree, *new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct buffer_head *ref_root_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct ocfs2_refcount_block *ref_rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) spin_lock(&osb->osb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (osb->osb_ref_tree_lru &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) osb->osb_ref_tree_lru->rf_blkno == rf_blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) tree = osb->osb_ref_tree_lru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) tree = ocfs2_find_refcount_tree(osb, rf_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) spin_unlock(&osb->osb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) new = ocfs2_allocate_refcount_tree(osb, rf_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (!new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * We need the generation to create the refcount tree lock and since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * it isn't changed during the tree modification, we are safe here to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * read without protection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * We also have to purge the cache after we create the lock since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * refcount block may have the stale data. It can only be trusted when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * we hold the refcount lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ocfs2_metadata_cache_exit(&new->rf_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) kfree(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) new->rf_generation = le32_to_cpu(ref_rb->rf_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) ocfs2_init_refcount_tree_lock(osb, new, rf_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) new->rf_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ocfs2_metadata_cache_purge(&new->rf_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) spin_lock(&osb->osb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) tree = ocfs2_find_refcount_tree(osb, rf_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) ocfs2_insert_refcount_tree(osb, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) tree = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) new = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) *ret_tree = tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) osb->osb_ref_tree_lru = tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) spin_unlock(&osb->osb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ocfs2_free_refcount_tree(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) brelse(ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct buffer_head *di_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) struct ocfs2_dinode *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) ret = ocfs2_read_inode_block(inode, &di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) BUG_ON(!ocfs2_is_refcount_inode(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) *ref_blkno = le64_to_cpu(di->i_refcount_loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) brelse(di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct ocfs2_refcount_tree *tree, int rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) ret = ocfs2_refcount_lock(tree, rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) down_write(&tree->rf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) down_read(&tree->rf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * Lock the refcount tree pointed by ref_blkno and return the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * In most case, we lock the tree and read the refcount block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * So read it here if the caller really needs it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) * If the tree has been re-created by other node, it will free the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * old one and re-create it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) u64 ref_blkno, int rw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct ocfs2_refcount_tree **ret_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct buffer_head **ref_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) int ret, delete_tree = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct ocfs2_refcount_tree *tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct buffer_head *ref_root_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct ocfs2_refcount_block *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ocfs2_refcount_tree_get(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ret = __ocfs2_lock_refcount_tree(osb, tree, rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ocfs2_refcount_tree_put(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) &ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) ocfs2_unlock_refcount_tree(osb, tree, rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * If the refcount block has been freed and re-created, we may need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * to recreate the refcount tree also.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * Here we just remove the tree from the rb-tree, and the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * kref holder will unlock and delete this refcount_tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * Then we goto "again" and ocfs2_get_refcount_tree will create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * the new refcount tree for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (!tree->rf_removed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) ocfs2_erase_refcount_tree_from_list(osb, tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) tree->rf_removed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) delete_tree = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ocfs2_unlock_refcount_tree(osb, tree, rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * We get an extra reference when we create the refcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * tree, so another put will destroy it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (delete_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) ocfs2_refcount_tree_put(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) brelse(ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) ref_root_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) *ret_tree = tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (ref_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) *ref_bh = ref_root_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ref_root_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) brelse(ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct ocfs2_refcount_tree *tree, int rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (rw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) up_write(&tree->rf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) up_read(&tree->rf_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) ocfs2_refcount_unlock(tree, rw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) ocfs2_refcount_tree_put(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct rb_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct ocfs2_refcount_tree *tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct rb_root *root = &osb->osb_rf_lock_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) while ((node = rb_last(root)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) trace_ocfs2_purge_refcount_trees(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) (unsigned long long) tree->rf_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) rb_erase(&tree->rf_node, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) ocfs2_free_refcount_tree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * Create a refcount tree for an inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * We take for granted that the inode is already locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) static int ocfs2_create_refcount_tree(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct buffer_head *di_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) handle_t *handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct ocfs2_alloc_context *meta_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) struct buffer_head *new_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) struct ocfs2_refcount_block *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) u16 suballoc_bit_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) u32 num_got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) u64 suballoc_loc, first_blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) BUG_ON(ocfs2_is_refcount_inode(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) trace_ocfs2_create_refcount_tree(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) (unsigned long long)oi->ip_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) &suballoc_bit_start, &num_got,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) &first_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (!new_tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) new_bh = sb_getblk(inode->i_sb, first_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (!new_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) OCFS2_JOURNAL_ACCESS_CREATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* Initialize ocfs2_refcount_block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) rb = (struct ocfs2_refcount_block *)new_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) memset(rb, 0, inode->i_sb->s_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) rb->rf_blkno = cpu_to_le64(first_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) rb->rf_count = cpu_to_le32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) rb->rf_records.rl_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) spin_lock(&osb->osb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) rb->rf_generation = osb->s_next_generation++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) spin_unlock(&osb->osb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) ocfs2_journal_dirty(handle, new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) di->i_refcount_loc = cpu_to_le64(first_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) trace_ocfs2_create_refcount_tree_blkno((unsigned long long)first_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) ocfs2_journal_dirty(handle, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * We have to init the tree lock here since it will use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * the generation number to create it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) new_tree->rf_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) spin_lock(&osb->osb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) tree = ocfs2_find_refcount_tree(osb, first_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * We've just created a new refcount tree in this block. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * we found a refcount tree on the ocfs2_super, it must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * one we just deleted. We free the old tree before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * inserting the new tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) BUG_ON(tree && tree->rf_generation == new_tree->rf_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ocfs2_insert_refcount_tree(osb, new_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) spin_unlock(&osb->osb_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) new_tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) ocfs2_refcount_tree_put(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (new_tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ocfs2_metadata_cache_exit(&new_tree->rf_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) kfree(new_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) brelse(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) ocfs2_free_alloc_context(meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static int ocfs2_set_refcount_tree(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) u64 refcount_loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) handle_t *handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct buffer_head *ref_root_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct ocfs2_refcount_block *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct ocfs2_refcount_tree *ref_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) BUG_ON(ocfs2_is_refcount_inode(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) &ref_tree, &ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) le32_add_cpu(&rb->rf_count, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ocfs2_journal_dirty(handle, ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) di->i_refcount_loc = cpu_to_le64(refcount_loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ocfs2_journal_dirty(handle, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) brelse(ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) int ret, delete_tree = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) handle_t *handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) struct ocfs2_refcount_block *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct inode *alloc_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct buffer_head *alloc_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct buffer_head *blk_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) struct ocfs2_refcount_tree *ref_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) u16 bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (!ocfs2_is_refcount_inode(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) BUG_ON(!ref_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * If we are the last user, we need to free the block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * So lock the allocator ahead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (le32_to_cpu(rb->rf_count) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) blk = le64_to_cpu(rb->rf_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) bit = le16_to_cpu(rb->rf_suballoc_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (rb->rf_suballoc_loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) bg_blkno = le64_to_cpu(rb->rf_suballoc_loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) bg_blkno = ocfs2_which_suballoc_group(blk, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) alloc_inode = ocfs2_get_system_file_inode(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) EXTENT_ALLOC_SYSTEM_INODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) le16_to_cpu(rb->rf_suballoc_slot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (!alloc_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) inode_lock(alloc_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) credits += OCFS2_SUBALLOC_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) handle = ocfs2_start_trans(osb, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) di->i_refcount_loc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) ocfs2_journal_dirty(handle, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) le32_add_cpu(&rb->rf_count , -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ocfs2_journal_dirty(handle, blk_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (!rb->rf_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) delete_tree = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) ocfs2_erase_refcount_tree_from_list(osb, ref_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) ret = ocfs2_free_suballoc_bits(handle, alloc_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) alloc_bh, bit, bg_blkno, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (alloc_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ocfs2_inode_unlock(alloc_inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) brelse(alloc_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) out_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (alloc_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) inode_unlock(alloc_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) iput(alloc_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (delete_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) ocfs2_refcount_tree_put(ref_tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) brelse(blk_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct buffer_head *ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) u64 cpos, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct ocfs2_refcount_rec *ret_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) int *index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) struct ocfs2_refcount_block *rb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct ocfs2_refcount_rec *rec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) rec = &rb->rf_records.rl_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (le64_to_cpu(rec->r_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) le32_to_cpu(rec->r_clusters) <= cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) else if (le64_to_cpu(rec->r_cpos) > cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* ok, cpos fail in this rec. Just return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (ret_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) *ret_rec = *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) if (ret_rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /* We meet with a hole here, so fake the rec. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ret_rec->r_cpos = cpu_to_le64(cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) ret_rec->r_refcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) if (i < le16_to_cpu(rb->rf_records.rl_used) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) le64_to_cpu(rec->r_cpos) < cpos + len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) ret_rec->r_clusters =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) ret_rec->r_clusters = cpu_to_le32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) *index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * Try to remove refcount tree. The mechanism is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * 1) Check whether i_clusters == 0, if no, exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * 2) check whether we have i_xattr_loc in dinode. if yes, exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * 3) Check whether we have inline xattr stored outside, if yes, exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * 4) Remove the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) int ocfs2_try_remove_refcount_tree(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct buffer_head *di_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) down_write(&oi->ip_xattr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) down_write(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (oi->ip_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) if ((oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) && di->i_xattr_loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) ocfs2_has_inline_xattr_value_outside(inode, di))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) ret = ocfs2_remove_refcount_tree(inode, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) up_write(&oi->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) up_write(&oi->ip_xattr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * Find the end range for a leaf refcount block indicated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) * el->l_recs[index].e_blkno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) static int ocfs2_get_refcount_cpos_end(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) struct ocfs2_extent_block *eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) struct ocfs2_extent_list *el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) int index, u32 *cpos_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) int ret, i, subtree_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) u32 cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) u64 blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) struct ocfs2_path *left_path = NULL, *right_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) struct ocfs2_extent_tree et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct ocfs2_extent_list *tmp_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) if (index < le16_to_cpu(el->l_next_free_rec) - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * We have a extent rec after index, so just use the e_cpos
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * of the next extent rec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) *cpos_end = le32_to_cpu(el->l_recs[index+1].e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) if (!eb || (eb && !eb->h_next_leaf_blk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * We are the last extent rec, so any high cpos should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * be stored in this leaf refcount block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) *cpos_end = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) * If the extent block isn't the last one, we have to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * the subtree root between this extent block and the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * leaf extent block and get the corresponding e_cpos from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * the subroot. Otherwise we may corrupt the b-tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) left_path = ocfs2_new_path_from_et(&et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) if (!left_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) cpos = le32_to_cpu(eb->h_list.l_recs[index].e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) ret = ocfs2_find_path(ci, left_path, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) right_path = ocfs2_new_path_from_path(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (!right_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) ret = ocfs2_find_cpos_for_right_leaf(sb, left_path, &cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) ret = ocfs2_find_path(ci, right_path, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) subtree_root = ocfs2_find_subtree_root(&et, left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) tmp_el = left_path->p_node[subtree_root].el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) blkno = left_path->p_node[subtree_root+1].bh->b_blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) for (i = 0; i < le16_to_cpu(tmp_el->l_next_free_rec); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) if (le64_to_cpu(tmp_el->l_recs[i].e_blkno) == blkno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) *cpos_end = le32_to_cpu(tmp_el->l_recs[i+1].e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) BUG_ON(i == le16_to_cpu(tmp_el->l_next_free_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) ocfs2_free_path(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ocfs2_free_path(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * Given a cpos and len, try to find the refcount record which contains cpos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * 1. If cpos can be found in one refcount record, return the record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * 2. If cpos can't be found, return a fake record which start from cpos
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * and end at a small value between cpos+len and start of the next record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * This fake record has r_refcount = 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) u64 cpos, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) struct ocfs2_refcount_rec *ret_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) int *index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) struct buffer_head **ret_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) int ret = 0, i, found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) u32 low_cpos, cpos_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct ocfs2_extent_rec *rec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) struct ocfs2_extent_block *eb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct ocfs2_refcount_block *rb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) (struct ocfs2_refcount_block *)ref_root_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) ret_rec, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) *ret_bh = ref_root_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) get_bh(ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) el = &rb->rf_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) low_cpos = cpos & OCFS2_32BIT_POS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (el->l_tree_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) eb = (struct ocfs2_extent_block *) eb_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (el->l_tree_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) ret = ocfs2_error(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) "refcount tree %llu has non zero tree depth in leaf btree tree block %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) (unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) (unsigned long long)eb_bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) rec = &el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (le32_to_cpu(rec->e_cpos) <= low_cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) ret = ocfs2_get_refcount_cpos_end(ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) eb, el, i, &cpos_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (cpos_end < low_cpos + len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) len = cpos_end - low_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) &ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) ret_rec, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) *ret_bh = ref_leaf_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) brelse(eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) enum ocfs2_ref_rec_contig {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) REF_CONTIG_NONE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) REF_CONTIG_LEFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) REF_CONTIG_RIGHT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) REF_CONTIG_LEFTRIGHT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static enum ocfs2_ref_rec_contig
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if ((rb->rf_records.rl_recs[index].r_refcount ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) rb->rf_records.rl_recs[index + 1].r_refcount) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) return REF_CONTIG_RIGHT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return REF_CONTIG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) static enum ocfs2_ref_rec_contig
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) ret = ocfs2_refcount_rec_adjacent(rb, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (index > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) enum ocfs2_ref_rec_contig tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (tmp == REF_CONTIG_RIGHT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (ret == REF_CONTIG_RIGHT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) ret = REF_CONTIG_LEFTRIGHT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) ret = REF_CONTIG_LEFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) rb->rf_records.rl_recs[index+1].r_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) memmove(&rb->rf_records.rl_recs[index + 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) &rb->rf_records.rl_recs[index + 2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) sizeof(struct ocfs2_refcount_rec) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) (le16_to_cpu(rb->rf_records.rl_used) - index - 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 0, sizeof(struct ocfs2_refcount_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) le16_add_cpu(&rb->rf_records.rl_used, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * Merge the refcount rec if we are contiguous with the adjacent recs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) enum ocfs2_ref_rec_contig contig =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) ocfs2_refcount_rec_contig(rb, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) if (contig == REF_CONTIG_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) BUG_ON(index == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) index--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) ocfs2_rotate_refcount_rec_left(rb, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) if (contig == REF_CONTIG_LEFTRIGHT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) ocfs2_rotate_refcount_rec_left(rb, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) * Change the refcount indexed by "index" in ref_bh.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * If refcount reaches 0, remove it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static int ocfs2_change_refcount_rec(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct buffer_head *ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) int index, int merge, int change)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct ocfs2_refcount_block *rb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct ocfs2_refcount_list *rl = &rb->rf_records;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) trace_ocfs2_change_refcount_rec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) (unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) index, le32_to_cpu(rec->r_refcount), change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) le32_add_cpu(&rec->r_refcount, change);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) if (!rec->r_refcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (index != le16_to_cpu(rl->rl_used) - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) memmove(rec, rec + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) (le16_to_cpu(rl->rl_used) - index - 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) sizeof(struct ocfs2_refcount_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 0, sizeof(struct ocfs2_refcount_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) le16_add_cpu(&rl->rl_used, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) } else if (merge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) ocfs2_refcount_rec_merge(rb, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) ocfs2_journal_dirty(handle, ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) static int ocfs2_expand_inline_ref_root(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) struct buffer_head **ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) struct ocfs2_alloc_context *meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) u16 suballoc_bit_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) u32 num_got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) u64 suballoc_loc, blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) struct buffer_head *new_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) struct ocfs2_refcount_block *new_rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) struct ocfs2_refcount_block *root_rb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) (struct ocfs2_refcount_block *)ref_root_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) &suballoc_bit_start, &num_got,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) &blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) new_bh = sb_getblk(sb, blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (new_bh == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) ocfs2_set_new_buffer_uptodate(ci, new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) ret = ocfs2_journal_access_rb(handle, ci, new_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) OCFS2_JOURNAL_ACCESS_CREATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) * Initialize ocfs2_refcount_block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) * It should contain the same information as the old root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) * so just memcpy it and change the corresponding field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) new_rb->rf_blkno = cpu_to_le64(blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) new_rb->rf_cpos = cpu_to_le32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) ocfs2_journal_dirty(handle, new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) /* Now change the root. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) memset(&root_rb->rf_list, 0, sb->s_blocksize -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) offsetof(struct ocfs2_refcount_block, rf_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) root_rb->rf_clusters = cpu_to_le32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) root_rb->rf_list.l_next_free_rec = cpu_to_le16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) ocfs2_journal_dirty(handle, ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) trace_ocfs2_expand_inline_ref_root((unsigned long long)blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) le16_to_cpu(new_rb->rf_records.rl_used));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) *ref_leaf_bh = new_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) new_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) brelse(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct ocfs2_refcount_rec *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) ocfs2_get_ref_rec_low_cpos(next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) const struct ocfs2_refcount_rec *l = a, *r = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (l_cpos > r_cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (l_cpos < r_cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) const struct ocfs2_refcount_rec *l = a, *r = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) u64 l_cpos = le64_to_cpu(l->r_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) u64 r_cpos = le64_to_cpu(r->r_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (l_cpos > r_cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (l_cpos < r_cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static void swap_refcount_rec(void *a, void *b, int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct ocfs2_refcount_rec *l = a, *r = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) swap(*l, *r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * The refcount cpos are ordered by their 64bit cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) * But we will use the low 32 bit to be the e_cpos in the b-tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * So we need to make sure that this pos isn't intersected with others.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) * Note: The refcount block is already sorted by their low 32 bit cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * So just try the middle pos first, and we will exit when we find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * the good position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) u32 *split_pos, int *split_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) int num_used = le16_to_cpu(rl->rl_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) int delta, middle = num_used / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) for (delta = 0; delta < middle; delta++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /* Let's check delta earlier than middle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (ocfs2_refcount_rec_no_intersect(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) &rl->rl_recs[middle - delta - 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) &rl->rl_recs[middle - delta])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) *split_index = middle - delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) /* For even counts, don't walk off the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if ((middle + delta + 1) == num_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /* Now try delta past middle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (ocfs2_refcount_rec_no_intersect(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) &rl->rl_recs[middle + delta],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) &rl->rl_recs[middle + delta + 1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) *split_index = middle + delta + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) if (delta >= middle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) struct buffer_head *new_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) u32 *split_cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) int split_index = 0, num_moved, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) u32 cpos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) struct ocfs2_refcount_block *rb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) struct ocfs2_refcount_list *rl = &rb->rf_records;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct ocfs2_refcount_block *new_rb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) (struct ocfs2_refcount_block *)new_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) trace_ocfs2_divide_leaf_refcount_block(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) (unsigned long long)ref_leaf_bh->b_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) le16_to_cpu(rl->rl_count), le16_to_cpu(rl->rl_used));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * XXX: Improvement later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) * If we know all the high 32 bit cpos is the same, no need to sort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * In order to make the whole process safe, we do:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) * 1. sort the entries by their low 32 bit cpos first so that we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * find the split cpos easily.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * 2. call ocfs2_insert_extent to insert the new refcount block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * 3. move the refcount rec to the new block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * 4. sort the entries by their 64 bit cpos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * 5. dirty the new_rb and rb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) sizeof(struct ocfs2_refcount_rec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) new_rb->rf_cpos = cpu_to_le32(cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) /* move refcount records starting from split_index to the new block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) num_moved = le16_to_cpu(rl->rl_used) - split_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) num_moved * sizeof(struct ocfs2_refcount_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) /*ok, remove the entries we just moved over to the other block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) memset(&rl->rl_recs[split_index], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) num_moved * sizeof(struct ocfs2_refcount_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) /* change old and new rl_used accordingly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) le16_add_cpu(&rl->rl_used, -num_moved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) new_rl->rl_used = cpu_to_le16(num_moved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) sizeof(struct ocfs2_refcount_rec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) cmp_refcount_rec_by_cpos, swap_refcount_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) sizeof(struct ocfs2_refcount_rec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) cmp_refcount_rec_by_cpos, swap_refcount_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) *split_cpos = cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) static int ocfs2_new_leaf_refcount_block(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) struct buffer_head *ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) struct ocfs2_alloc_context *meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) u16 suballoc_bit_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) u32 num_got, new_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) u64 suballoc_loc, blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) struct ocfs2_refcount_block *root_rb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) (struct ocfs2_refcount_block *)ref_root_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) struct buffer_head *new_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) struct ocfs2_refcount_block *new_rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) struct ocfs2_extent_tree ref_et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) ret = ocfs2_claim_metadata(handle, meta_ac, 1, &suballoc_loc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) &suballoc_bit_start, &num_got,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) &blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) new_bh = sb_getblk(sb, blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (new_bh == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) ocfs2_set_new_buffer_uptodate(ci, new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) ret = ocfs2_journal_access_rb(handle, ci, new_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) OCFS2_JOURNAL_ACCESS_CREATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) /* Initialize ocfs2_refcount_block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) memset(new_rb, 0, sb->s_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) new_rb->rf_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) new_rb->rf_suballoc_loc = cpu_to_le64(suballoc_loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) new_rb->rf_blkno = cpu_to_le64(blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) new_rb->rf_records.rl_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) new_rb->rf_generation = root_rb->rf_generation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) ocfs2_journal_dirty(handle, ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) ocfs2_journal_dirty(handle, new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) trace_ocfs2_new_leaf_refcount_block(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) (unsigned long long)new_bh->b_blocknr, new_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) /* Insert the new leaf block with the specific offset cpos. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 1, 0, meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) brelse(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) static int ocfs2_expand_refcount_tree(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) struct buffer_head *ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct ocfs2_alloc_context *meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) struct buffer_head *expand_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) if (ref_root_bh == ref_leaf_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * the old root bh hasn't been expanded to a b-tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) * so expand it first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) &expand_bh, meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) expand_bh = ref_leaf_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) get_bh(expand_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) /* Now add a new refcount block into the tree.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) expand_bh, meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) brelse(expand_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) * Adjust the extent rec in b-tree representing ref_leaf_bh.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) * Only called when we have inserted a new refcount rec at index 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * which means ocfs2_extent_rec.e_cpos may need some change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) static int ocfs2_adjust_refcount_rec(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) struct buffer_head *ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) struct ocfs2_refcount_rec *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) int ret = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) u32 new_cpos, old_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) struct ocfs2_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) struct ocfs2_extent_tree et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) struct ocfs2_refcount_block *rb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) (struct ocfs2_refcount_block *)ref_root_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) old_cpos = le32_to_cpu(rb->rf_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) if (old_cpos <= new_cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) path = ocfs2_new_path_from_et(&et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) ret = ocfs2_find_path(ci, path, old_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * 2 more credits, one for the leaf refcount block, one for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * the extent block contains the extent rec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) ret = ocfs2_extend_trans(handle, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) /* change the leaf extent block first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) BUG_ON(i == le16_to_cpu(el->l_next_free_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) /* change the r_cpos in the leaf block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) rb->rf_cpos = cpu_to_le32(new_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) ocfs2_journal_dirty(handle, path_leaf_bh(path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) ocfs2_journal_dirty(handle, ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) ocfs2_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) static int ocfs2_insert_refcount_rec(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) struct buffer_head *ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) struct ocfs2_refcount_rec *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) int index, int merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) struct ocfs2_alloc_context *meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) struct ocfs2_refcount_block *rb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) struct ocfs2_refcount_list *rf_list = &rb->rf_records;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) struct buffer_head *new_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (rf_list->rl_used == rf_list->rl_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) u64 cpos = le64_to_cpu(rec->r_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) u32 len = le32_to_cpu(rec->r_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) ref_leaf_bh, meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) cpos, len, NULL, &index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) &new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) ref_leaf_bh = new_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) rf_list = &rb->rf_records;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) if (index < le16_to_cpu(rf_list->rl_used))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) memmove(&rf_list->rl_recs[index + 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) &rf_list->rl_recs[index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) (le16_to_cpu(rf_list->rl_used) - index) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) sizeof(struct ocfs2_refcount_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) trace_ocfs2_insert_refcount_rec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) (unsigned long long)ref_leaf_bh->b_blocknr, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) (unsigned long long)le64_to_cpu(rec->r_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) rf_list->rl_recs[index] = *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) le16_add_cpu(&rf_list->rl_used, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (merge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) ocfs2_refcount_rec_merge(rb, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) ocfs2_journal_dirty(handle, ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if (index == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) ret = ocfs2_adjust_refcount_rec(handle, ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) ref_leaf_bh, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) brelse(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * Split the refcount_rec indexed by "index" in ref_leaf_bh.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) * This is much simple than our b-tree code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * split_rec is the new refcount rec we want to insert.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) * If split_rec->r_refcount > 0, we are changing the refcount(in case we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) * increase refcount or decrease a refcount to non-zero).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) * If split_rec->r_refcount == 0, we are punching a hole in current refcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) * rec( in case we decrease a refcount to zero).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) static int ocfs2_split_refcount_rec(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) struct buffer_head *ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) struct ocfs2_refcount_rec *split_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) int index, int merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) int ret, recs_need;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) struct ocfs2_refcount_block *rb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) struct ocfs2_refcount_list *rf_list = &rb->rf_records;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) struct ocfs2_refcount_rec *tail_rec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) struct buffer_head *new_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) trace_ocfs2_split_refcount_rec(le64_to_cpu(orig_rec->r_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) le32_to_cpu(orig_rec->r_clusters),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) le32_to_cpu(orig_rec->r_refcount),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) le64_to_cpu(split_rec->r_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) le32_to_cpu(split_rec->r_clusters),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) le32_to_cpu(split_rec->r_refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) * If we just need to split the header or tail clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * no more recs are needed, just split is OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) * Otherwise we at least need one new recs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (!split_rec->r_refcount &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) (split_rec->r_cpos == orig_rec->r_cpos ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) le64_to_cpu(split_rec->r_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) le32_to_cpu(split_rec->r_clusters) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) recs_need = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) recs_need = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) * We need one more rec if we split in the middle and the new rec have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) * some refcount in it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) if (split_rec->r_refcount &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) (split_rec->r_cpos != orig_rec->r_cpos &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) le64_to_cpu(split_rec->r_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) le32_to_cpu(split_rec->r_clusters) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) recs_need++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) /* If the leaf block don't have enough record, expand it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) if (le16_to_cpu(rf_list->rl_used) + recs_need >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) le16_to_cpu(rf_list->rl_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) struct ocfs2_refcount_rec tmp_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) u64 cpos = le64_to_cpu(orig_rec->r_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) len = le32_to_cpu(orig_rec->r_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) ref_leaf_bh, meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) * We have to re-get it since now cpos may be moved to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) * another leaf block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) cpos, len, &tmp_rec, &index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) &new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) ref_leaf_bh = new_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) rf_list = &rb->rf_records;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) orig_rec = &rf_list->rl_recs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) * We have calculated out how many new records we need and store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) * in recs_need, so spare enough space first by moving the records
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) * after "index" to the end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) if (index != le16_to_cpu(rf_list->rl_used) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) memmove(&rf_list->rl_recs[index + 1 + recs_need],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) &rf_list->rl_recs[index + 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) (le16_to_cpu(rf_list->rl_used) - index - 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) sizeof(struct ocfs2_refcount_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) len = (le64_to_cpu(orig_rec->r_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) le32_to_cpu(orig_rec->r_clusters)) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) (le64_to_cpu(split_rec->r_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) le32_to_cpu(split_rec->r_clusters));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) * If we have "len", the we will split in the tail and move it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) * to the end of the space we have just spared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) tail_rec = &rf_list->rl_recs[index + recs_need];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) le64_add_cpu(&tail_rec->r_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) le32_to_cpu(tail_rec->r_clusters) - len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) tail_rec->r_clusters = cpu_to_le32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) * If the split pos isn't the same as the original one, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) * split in the head.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) * Note: We have the chance that split_rec.r_refcount = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) * recs_need = 0 and len > 0, which means we just cut the head from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) * the orig_rec and in that case we have done some modification in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) * orig_rec above, so the check for r_cpos is faked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) len = le64_to_cpu(split_rec->r_cpos) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) le64_to_cpu(orig_rec->r_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) orig_rec->r_clusters = cpu_to_le32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) le16_add_cpu(&rf_list->rl_used, recs_need);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (split_rec->r_refcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) rf_list->rl_recs[index] = *split_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) trace_ocfs2_split_refcount_rec_insert(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) (unsigned long long)ref_leaf_bh->b_blocknr, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) (unsigned long long)le64_to_cpu(split_rec->r_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) le32_to_cpu(split_rec->r_clusters),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) le32_to_cpu(split_rec->r_refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) if (merge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) ocfs2_refcount_rec_merge(rb, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) ocfs2_journal_dirty(handle, ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) brelse(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) static int __ocfs2_increase_refcount(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) u64 cpos, u32 len, int merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) int ret = 0, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) struct buffer_head *ref_leaf_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) struct ocfs2_refcount_rec rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) unsigned int set_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) trace_ocfs2_increase_refcount_begin(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) (unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) (unsigned long long)cpos, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) cpos, len, &rec, &index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) &ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) set_len = le32_to_cpu(rec.r_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) * Here we may meet with 3 situations:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * 1. If we find an already existing record, and the length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * is the same, cool, we just need to increase the r_refcount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) * and it is OK.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) * 2. If we find a hole, just insert it with r_refcount = 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) * 3. If we are in the middle of one extent record, split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) set_len <= len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) trace_ocfs2_increase_refcount_change(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) (unsigned long long)cpos, set_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) le32_to_cpu(rec.r_refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) ret = ocfs2_change_refcount_rec(handle, ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) ref_leaf_bh, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) merge, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) } else if (!rec.r_refcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) rec.r_refcount = cpu_to_le32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) trace_ocfs2_increase_refcount_insert(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) (unsigned long long)le64_to_cpu(rec.r_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) set_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) &rec, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) merge, meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) set_len = min((u64)(cpos + len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) le64_to_cpu(rec.r_cpos) + set_len) - cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) rec.r_cpos = cpu_to_le64(cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) rec.r_clusters = cpu_to_le32(set_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) le32_add_cpu(&rec.r_refcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) trace_ocfs2_increase_refcount_split(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) (unsigned long long)le64_to_cpu(rec.r_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) set_len, le32_to_cpu(rec.r_refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) ret = ocfs2_split_refcount_rec(handle, ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) ref_root_bh, ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) &rec, index, merge,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) meta_ac, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) cpos += set_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) len -= set_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) brelse(ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) ref_leaf_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) brelse(ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) static int ocfs2_remove_refcount_extent(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) struct buffer_head *ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) struct ocfs2_refcount_block *rb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) struct ocfs2_extent_tree et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) BUG_ON(rb->rf_records.rl_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) trace_ocfs2_remove_refcount_extent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) (unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) (unsigned long long)ref_leaf_bh->b_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) le32_to_cpu(rb->rf_cpos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 1, meta_ac, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) ocfs2_remove_from_cache(ci, ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) * add the freed block to the dealloc so that it will be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) * when we run dealloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) le16_to_cpu(rb->rf_suballoc_slot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) le64_to_cpu(rb->rf_suballoc_loc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) le64_to_cpu(rb->rf_blkno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) le16_to_cpu(rb->rf_suballoc_bit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) le32_add_cpu(&rb->rf_clusters, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * check whether we need to restore the root refcount block if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * there is no leaf extent block at atll.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) if (!rb->rf_list.l_next_free_rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) BUG_ON(rb->rf_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) trace_ocfs2_restore_refcount_block(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) (unsigned long long)ref_root_bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) rb->rf_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) rb->rf_parent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) rb->rf_cpos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) memset(&rb->rf_records, 0, sb->s_blocksize -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) offsetof(struct ocfs2_refcount_block, rf_records));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) rb->rf_records.rl_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) ocfs2_journal_dirty(handle, ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) int ocfs2_increase_refcount(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) u64 cpos, u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) return __ocfs2_increase_refcount(handle, ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) cpos, len, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) meta_ac, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) static int ocfs2_decrease_refcount_rec(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) struct buffer_head *ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) int index, u64 cpos, unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) struct ocfs2_refcount_block *rb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) BUG_ON(cpos + len >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) trace_ocfs2_decrease_refcount_rec(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) (unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) (unsigned long long)cpos, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) if (cpos == le64_to_cpu(rec->r_cpos) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) len == le32_to_cpu(rec->r_clusters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) ret = ocfs2_change_refcount_rec(handle, ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) ref_leaf_bh, index, 1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) struct ocfs2_refcount_rec split = *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) split.r_cpos = cpu_to_le64(cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) split.r_clusters = cpu_to_le32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) le32_add_cpu(&split.r_refcount, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) ret = ocfs2_split_refcount_rec(handle, ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) ref_root_bh, ref_leaf_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) &split, index, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) meta_ac, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) /* Remove the leaf refcount block if it contains no refcount record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) ref_leaf_bh, meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) static int __ocfs2_decrease_refcount(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) u64 cpos, u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) struct ocfs2_cached_dealloc_ctxt *dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) int delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) int ret = 0, index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) struct ocfs2_refcount_rec rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) unsigned int r_count = 0, r_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) struct buffer_head *ref_leaf_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) trace_ocfs2_decrease_refcount(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) (unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) (unsigned long long)cpos, len, delete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) cpos, len, &rec, &index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) &ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) r_count = le32_to_cpu(rec.r_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) BUG_ON(r_count == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) if (!delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) BUG_ON(r_count > 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) le32_to_cpu(rec.r_clusters)) - cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) ref_leaf_bh, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) cpos, r_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) meta_ac, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) if (le32_to_cpu(rec.r_refcount) == 1 && delete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) ret = ocfs2_cache_cluster_dealloc(dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) ocfs2_clusters_to_blocks(sb, cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) r_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) cpos += r_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) len -= r_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) brelse(ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) ref_leaf_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) brelse(ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) /* Caller must hold refcount tree lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) int ocfs2_decrease_refcount(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) handle_t *handle, u32 cpos, u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) struct ocfs2_cached_dealloc_ctxt *dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) int delete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) u64 ref_blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) struct buffer_head *ref_root_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) struct ocfs2_refcount_tree *tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) BUG_ON(!ocfs2_is_refcount_inode(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) ret = ocfs2_get_refcount_block(inode, &ref_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) &ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) cpos, len, meta_ac, dealloc, delete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) brelse(ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) * Mark the already-existing extent at cpos as refcounted for len clusters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) * This adds the refcount extent flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) * If the existing extent is larger than the request, initiate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) * split. An attempt will be made at merging with adjacent extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) * The caller is responsible for passing down meta_ac if we'll need it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) static int ocfs2_mark_extent_refcounted(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) handle_t *handle, u32 cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) u32 len, u32 phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) trace_ocfs2_mark_extent_refcounted(OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) cpos, len, phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) ret = ocfs2_change_extent_flag(handle, et, cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) len, phys, meta_ac, dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) OCFS2_EXT_REFCOUNTED, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) * Given some contiguous physical clusters, calculate what we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) * for modifying their refcount.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) u64 start_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) u32 clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) int *meta_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) int *credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) int ret = 0, index, ref_blocks = 0, recs_add = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) u64 cpos = start_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) struct ocfs2_refcount_block *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) struct ocfs2_refcount_rec rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) while (clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) cpos, clusters, &rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) &index, &ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) if (ref_leaf_bh != prev_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) * Now we encounter a new leaf block, so calculate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) * whether we need to extend the old leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) if (prev_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) rb = (struct ocfs2_refcount_block *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) prev_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) if (le16_to_cpu(rb->rf_records.rl_used) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) recs_add >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) le16_to_cpu(rb->rf_records.rl_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) ref_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) recs_add = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) *credits += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) brelse(prev_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) prev_bh = ref_leaf_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) get_bh(prev_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) trace_ocfs2_calc_refcount_meta_credits_iterate(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) recs_add, (unsigned long long)cpos, clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) (unsigned long long)le64_to_cpu(rec.r_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) le32_to_cpu(rec.r_clusters),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) le32_to_cpu(rec.r_refcount), index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) le32_to_cpu(rec.r_clusters)) - cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) * We record all the records which will be inserted to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) * same refcount block, so that we can tell exactly whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) * we need a new refcount block or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) * If we will insert a new one, this is easy and only happens
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) * during adding refcounted flag to the extent, so we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) * have a chance of spliting. We just need one record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) * If the refcount rec already exists, that would be a little
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) * complicated. we may have to:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) * 1) split at the beginning if the start pos isn't aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) * we need 1 more record in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) * 2) split int the end if the end pos isn't aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) * we need 1 more record in this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) * 3) split in the middle because of file system fragmentation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) * we need 2 more records in this case(we can't detect this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) * beforehand, so always think of the worst case).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) if (rec.r_refcount) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) recs_add += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) /* Check whether we need a split at the beginning. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) if (cpos == start_cpos &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) cpos != le64_to_cpu(rec.r_cpos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) recs_add++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) /* Check whether we need a split in the end. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) le32_to_cpu(rec.r_clusters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) recs_add++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) recs_add++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) brelse(ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) ref_leaf_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) clusters -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) cpos += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (prev_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) if (le16_to_cpu(rb->rf_records.rl_used) + recs_add >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) le16_to_cpu(rb->rf_records.rl_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) ref_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) *credits += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) if (!ref_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) *meta_add += ref_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) *credits += ref_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) * So we may need ref_blocks to insert into the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) * That also means we need to change the b-tree and add that number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) * of records since we never merge them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) * We need one more block for expansion since the new created leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) * block is also full and needs split.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) struct ocfs2_extent_tree et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) *meta_add += ocfs2_extend_meta_needed(et.et_root_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) *credits += ocfs2_calc_extend_credits(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) et.et_root_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) *meta_add += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) trace_ocfs2_calc_refcount_meta_credits(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) (unsigned long long)start_cpos, clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) *meta_add, *credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) brelse(ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) brelse(prev_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) * For refcount tree, we will decrease some contiguous clusters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) * refcount count, so just go through it to see how many blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) * we gonna touch and whether we need to create new blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) * Normally the refcount blocks store these refcount should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) * contiguous also, so that we can get the number easily.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) * We will at most add split 2 refcount records and 2 more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) * refcount blocks, so just check it in a rough way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) * Caller must hold refcount tree lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) u64 refcount_loc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) u64 phys_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) u32 clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) int *credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) int *ref_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) struct buffer_head *ref_root_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) struct ocfs2_refcount_tree *tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) ret = ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) BUG_ON(!ocfs2_is_refcount_inode(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) refcount_loc, &tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) ret = ocfs2_read_refcount_block(&tree->rf_ci, refcount_loc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) &ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) &tree->rf_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) start_cpos, clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) ref_blocks, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) trace_ocfs2_prepare_refcount_change_for_del(*ref_blocks, *credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) brelse(ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) #define MAX_CONTIG_BYTES 1048576
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) return ~(ocfs2_cow_contig_clusters(sb) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) * Given an extent that starts at 'start' and an I/O that starts at 'cpos',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) * find an offset (start + (n * contig_clusters)) that is closest to cpos
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) * while still being less than or equal to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) * The goal is to break the extent at a multiple of contig_clusters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) static inline unsigned int ocfs2_cow_align_start(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) unsigned int start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) unsigned int cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) BUG_ON(start > cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) * Given a cluster count of len, pad it out so that it is a multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) * of contig_clusters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) unsigned int padded =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) (len + (ocfs2_cow_contig_clusters(sb) - 1)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) ocfs2_cow_contig_mask(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) /* Did we wrap? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) if (padded < len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) padded = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) return padded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) * Calculate out the start and number of virtual clusters we need to to CoW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) * cpos is vitual start cluster position we want to do CoW in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) * file and write_len is the cluster length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) * max_cpos is the place where we want to stop CoW intentionally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) * Normal we will start CoW from the beginning of extent record cotaining cpos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) * get good I/O from the resulting extent tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) static int ocfs2_refcount_cal_cow_clusters(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) struct ocfs2_extent_list *el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) u32 cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) u32 write_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) u32 max_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) u32 *cow_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) u32 *cow_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) int tree_height = le16_to_cpu(el->l_tree_depth), i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) struct buffer_head *eb_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) struct ocfs2_extent_block *eb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) unsigned int want_clusters, rec_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) int leaf_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) BUG_ON(cpos + write_len > max_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) if (tree_height > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) eb = (struct ocfs2_extent_block *) eb_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) if (el->l_tree_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) ret = ocfs2_error(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) "Inode %lu has non zero tree depth in leaf block %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) (unsigned long long)eb_bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) *cow_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) rec = &el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) if (ocfs2_is_empty_extent(rec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) mlog_bug_on_msg(i != 0, "Inode %lu has empty record in "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) "index %d\n", inode->i_ino, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) if (le32_to_cpu(rec->e_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) le16_to_cpu(rec->e_leaf_clusters) <= cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) if (*cow_len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) * We should find a refcounted record in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) * first pass.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) *cow_start = le32_to_cpu(rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) * If we encounter a hole, a non-refcounted record or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) * pass the max_cpos, stop the search.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) (max_cpos <= le32_to_cpu(rec->e_cpos)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) leaf_clusters = le16_to_cpu(rec->e_leaf_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) if (rec_end > max_cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) rec_end = max_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) * How many clusters do we actually need from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) * this extent? First we see how many we actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) * need to complete the write. If that's smaller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) * than contig_clusters, we try for contig_clusters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) if (!*cow_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) want_clusters = write_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) want_clusters = (cpos + write_len) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) (*cow_start + *cow_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) if (want_clusters < contig_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) want_clusters = contig_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) * If the write does not cover the whole extent, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) * need to calculate how we're going to split the extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) * We try to do it on contig_clusters boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) * Any extent smaller than contig_clusters will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) * CoWed in its entirety.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) if (leaf_clusters <= contig_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) *cow_len += leaf_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) else if (*cow_len || (*cow_start == cpos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) * This extent needs to be CoW'd from its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) * beginning, so all we have to do is compute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) * how many clusters to grab. We align
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) * want_clusters to the edge of contig_clusters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) * to get better I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) want_clusters = ocfs2_cow_align_length(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) want_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) if (leaf_clusters < want_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) *cow_len += leaf_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) *cow_len += want_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) } else if ((*cow_start + contig_clusters) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) (cpos + write_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) * Breaking off contig_clusters at the front
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) * of the extent will cover our write. That's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) * easy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) *cow_len = contig_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) } else if ((rec_end - cpos) <= contig_clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) * Breaking off contig_clusters at the tail of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) * this extent will cover cpos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) *cow_start = rec_end - contig_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) *cow_len = contig_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) } else if ((rec_end - cpos) <= want_clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) * While we can't fit the entire write in this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) * extent, we know that the write goes from cpos
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) * to the end of the extent. Break that off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) * We try to break it at some multiple of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) * contig_clusters from the front of the extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) * Failing that (ie, cpos is within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) * contig_clusters of the front), we'll CoW the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) * entire extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) *cow_start = ocfs2_cow_align_start(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) *cow_start, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) *cow_len = rec_end - *cow_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) * Ok, the entire write lives in the middle of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) * this extent. Let's try to slice the extent up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) * nicely. Optimally, our CoW region starts at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) * m*contig_clusters from the beginning of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) * extent and goes for n*contig_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) * covering the entire write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) *cow_start = ocfs2_cow_align_start(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) *cow_start, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) want_clusters = (cpos + write_len) - *cow_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) want_clusters = ocfs2_cow_align_length(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) want_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) if (*cow_start + want_clusters <= rec_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) *cow_len = want_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) *cow_len = rec_end - *cow_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) /* Have we covered our entire write yet? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) if ((*cow_start + *cow_len) >= (cpos + write_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) * If we reach the end of the extent block and don't get enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) * clusters, continue with the next extent block if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) if (i + 1 == le16_to_cpu(el->l_next_free_rec) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) eb && eb->h_next_leaf_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) brelse(eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) eb_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) ret = ocfs2_read_extent_block(INODE_CACHE(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) le64_to_cpu(eb->h_next_leaf_blk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) &eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) eb = (struct ocfs2_extent_block *) eb_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) i = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) brelse(eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) * Prepare meta_ac, data_ac and calculate credits when we want to add some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) * num_clusters in data_tree "et" and change the refcount for the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) * clusters(starting form p_cluster) in the refcount tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) * Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) * 1. since we may split the old tree, so we at most will need num_clusters + 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) * more new leaf records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) * just give data_ac = NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) static int ocfs2_lock_refcount_allocators(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) u32 p_cluster, u32 num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) struct ocfs2_caching_info *ref_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) struct ocfs2_alloc_context **meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) struct ocfs2_alloc_context **data_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) int *credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) int ret = 0, meta_add = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) int num_free_extents = ocfs2_num_free_extents(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) if (num_free_extents < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) ret = num_free_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) if (num_free_extents < num_clusters + 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) meta_add =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) ocfs2_extend_meta_needed(et->et_root_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) *credits += ocfs2_calc_extend_credits(sb, et->et_root_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) p_cluster, num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) &meta_add, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) trace_ocfs2_lock_refcount_allocators(meta_add, *credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) if (data_ac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) data_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) if (*meta_ac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) ocfs2_free_alloc_context(*meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) *meta_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) BUG_ON(buffer_dirty(bh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) clear_buffer_mapped(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) int ocfs2_duplicate_clusters_by_page(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) u32 cpos, u32 old_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) u32 new_cluster, u32 new_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) int ret = 0, partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) pgoff_t page_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) unsigned int from, to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) loff_t offset, end, map_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) new_cluster, new_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) * We only duplicate pages until we reach the page contains i_size - 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) * So trim 'end' to i_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) if (end > i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) end = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) while (offset < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) page_index = offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) if (map_end > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) map_end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) /* from, to is the offset within the page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) from = offset & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) to = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) if (map_end & (PAGE_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) to = map_end & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) page = find_or_create_page(mapping, page_index, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) * In case PAGE_SIZE <= CLUSTER_SIZE, we do not expect a dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) * page, so write it back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) if (PageDirty(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) * write_on_page will unlock the page on return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) ret = write_one_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) if (!PageUptodate(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) ret = block_read_full_page(page, ocfs2_get_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) lock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) if (page_has_buffers(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) ret = walk_page_buffers(handle, page_buffers(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) from, to, &partial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) ocfs2_clear_cow_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) ocfs2_map_and_dirty_page(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) handle, from, to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) page, 0, &new_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) mark_page_accessed(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) offset = map_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) u32 cpos, u32 old_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) u32 new_cluster, u32 new_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) struct ocfs2_caching_info *ci = INODE_CACHE(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) struct ocfs2_super *osb = OCFS2_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) struct buffer_head *old_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) struct buffer_head *new_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) new_cluster, new_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) for (i = 0; i < blocks; i++, old_block++, new_block++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) new_bh = sb_getblk(osb->sb, new_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) if (new_bh == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) ocfs2_set_new_buffer_uptodate(ci, new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) ret = ocfs2_read_block(ci, old_block, &old_bh, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) ret = ocfs2_journal_access(handle, ci, new_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) OCFS2_JOURNAL_ACCESS_CREATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) memcpy(new_bh->b_data, old_bh->b_data, sb->s_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) ocfs2_journal_dirty(handle, new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) brelse(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) brelse(old_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) new_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) old_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) brelse(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) brelse(old_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) static int ocfs2_clear_ext_refcount(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) u32 cpos, u32 p_cluster, u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) unsigned int ext_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) int ret, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) struct ocfs2_extent_rec replace_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) struct ocfs2_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) trace_ocfs2_clear_ext_refcount((unsigned long long)ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) cpos, len, p_cluster, ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) memset(&replace_rec, 0, sizeof(replace_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) replace_rec.e_cpos = cpu_to_le32(cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) replace_rec.e_leaf_clusters = cpu_to_le16(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) p_cluster));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) replace_rec.e_flags = ext_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) path = ocfs2_new_path_from_et(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) ret = ocfs2_find_path(et->et_ci, path, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) index = ocfs2_search_extent_list(el, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) if (index == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) ret = ocfs2_error(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) "Inode %llu has an extent at cpos %u which can no longer be found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) (unsigned long long)ino, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) ret = ocfs2_split_extent(handle, et, path, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) &replace_rec, meta_ac, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) ocfs2_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) static int ocfs2_replace_clusters(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) struct ocfs2_cow_context *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) u32 cpos, u32 old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) u32 new, u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) unsigned int ext_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) struct ocfs2_caching_info *ci = context->data_et.et_ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) u64 ino = ocfs2_metadata_cache_owner(ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) trace_ocfs2_replace_clusters((unsigned long long)ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) cpos, old, new, len, ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) /*If the old clusters is unwritten, no need to duplicate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) ret = context->cow_duplicate_clusters(handle, context->inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) cpos, old, new, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) ret = ocfs2_clear_ext_refcount(handle, &context->data_et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) cpos, new, len, ext_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) context->meta_ac, &context->dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) int ocfs2_cow_sync_writeback(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) u32 cpos, u32 num_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) loff_t offset, end, map_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) pgoff_t page_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) if (ocfs2_should_order_data(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) ret = filemap_fdatawrite_range(inode->i_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) offset, end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) while (offset < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) page_index = offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) if (map_end > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) map_end = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) page = find_or_create_page(inode->i_mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) page_index, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) BUG_ON(!page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) wait_on_page_writeback(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) if (PageError(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) mark_page_accessed(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) unlock_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) offset = map_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) u32 v_cluster, u32 *p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) u32 *num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) unsigned int *extent_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) return ocfs2_get_clusters(context->inode, v_cluster, p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) num_clusters, extent_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) static int ocfs2_make_clusters_writable(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) struct ocfs2_cow_context *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) u32 cpos, u32 p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) u32 num_clusters, unsigned int e_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) int ret, delete, index, credits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) u32 new_bit, new_len, orig_num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) unsigned int set_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) struct ocfs2_super *osb = OCFS2_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) struct buffer_head *ref_leaf_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) struct ocfs2_refcount_rec rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) trace_ocfs2_make_clusters_writable(cpos, p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) num_clusters, e_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) &context->data_et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) ref_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) context->ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) &context->meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) &context->data_ac, &credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) if (context->post_refcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) credits += context->post_refcount->credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) credits += context->extra_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) handle = ocfs2_start_trans(osb, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) orig_num_clusters = num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) while (num_clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) p_cluster, num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) &rec, &index, &ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) BUG_ON(!rec.r_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) set_len = min((u64)p_cluster + num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) le64_to_cpu(rec.r_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) le32_to_cpu(rec.r_clusters)) - p_cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) * There are many different situation here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) * 1. If refcount == 1, remove the flag and don't COW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) * 2. If refcount > 1, allocate clusters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) * Here we may not allocate r_len once at a time, so continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) * until we reach num_clusters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) if (le32_to_cpu(rec.r_refcount) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) delete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) ret = ocfs2_clear_ext_refcount(handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) &context->data_et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) cpos, p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) set_len, e_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) context->meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) &context->dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) delete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) ret = __ocfs2_claim_clusters(handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) context->data_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 1, set_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) &new_bit, &new_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) ret = ocfs2_replace_clusters(handle, context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) cpos, p_cluster, new_bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) new_len, e_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) set_len = new_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) ret = __ocfs2_decrease_refcount(handle, ref_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) context->ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) p_cluster, set_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) context->meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) &context->dealloc, delete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) cpos += set_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) p_cluster += set_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) num_clusters -= set_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) brelse(ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) ref_leaf_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) /* handle any post_cow action. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) if (context->post_refcount && context->post_refcount->func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) ret = context->post_refcount->func(context->inode, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) context->post_refcount->para);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) * Here we should write the new page out first if we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) * in write-back mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) if (context->get_clusters == ocfs2_di_get_clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) ret = ocfs2_cow_sync_writeback(sb, context->inode, cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) orig_num_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) if (context->data_ac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) ocfs2_free_alloc_context(context->data_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) context->data_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) if (context->meta_ac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) ocfs2_free_alloc_context(context->meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) context->meta_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) brelse(ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) struct inode *inode = context->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) u32 cow_start = context->cow_start, cow_len = context->cow_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) u32 p_cluster, num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) unsigned int ext_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) if (!ocfs2_refcount_tree(osb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) return ocfs2_error(inode->i_sb, "Inode %lu want to use refcount tree, but the feature bit is not set in the super block\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) inode->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) ocfs2_init_dealloc_ctxt(&context->dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) while (cow_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) ret = context->get_clusters(context, cow_start, &p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) &num_clusters, &ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) if (cow_len < num_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) num_clusters = cow_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) ret = ocfs2_make_clusters_writable(inode->i_sb, context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) cow_start, p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) num_clusters, ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) cow_len -= num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) cow_start += num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) if (ocfs2_dealloc_has_cluster(&context->dealloc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) ocfs2_schedule_truncate_log_flush(osb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) ocfs2_run_deallocs(osb, &context->dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) * Starting at cpos, try to CoW write_len clusters. Don't CoW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) * past max_cpos. This will stop when it runs into a hole or an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) * unrefcounted extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) static int ocfs2_refcount_cow_hunk(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) u32 cpos, u32 write_len, u32 max_cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) u32 cow_start = 0, cow_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) struct buffer_head *ref_root_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) struct ocfs2_refcount_tree *ref_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) struct ocfs2_cow_context *context = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) BUG_ON(!ocfs2_is_refcount_inode(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) cpos, write_len, max_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) &cow_start, &cow_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) trace_ocfs2_refcount_cow_hunk(OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) cpos, write_len, max_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) cow_start, cow_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) BUG_ON(cow_len == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) if (!context) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 1, &ref_tree, &ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) context->inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) context->cow_start = cow_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) context->cow_len = cow_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) context->ref_tree = ref_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) context->ref_root_bh = ref_root_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) context->get_clusters = ocfs2_di_get_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) ocfs2_init_dinode_extent_tree(&context->data_et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) INODE_CACHE(inode), di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) ret = ocfs2_replace_cow(context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) * truncate the extent map here since no matter whether we meet with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) * any error during the action, we shouldn't trust cached extent map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) * any more.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) ocfs2_extent_map_trunc(inode, cow_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) brelse(ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) kfree(context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) * CoW any and all clusters between cpos and cpos+write_len.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) * Don't CoW past max_cpos. If this returns successfully, all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) * clusters between cpos and cpos+write_len are safe to modify.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) int ocfs2_refcount_cow(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) u32 cpos, u32 write_len, u32 max_cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) u32 p_cluster, num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) unsigned int ext_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) while (write_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) &num_clusters, &ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) if (write_len < num_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) num_clusters = write_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) if (ext_flags & OCFS2_EXT_REFCOUNTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) num_clusters, max_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) write_len -= num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) cpos += num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) static int ocfs2_xattr_value_get_clusters(struct ocfs2_cow_context *context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) u32 v_cluster, u32 *p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) u32 *num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) unsigned int *extent_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) struct inode *inode = context->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) struct ocfs2_xattr_value_root *xv = context->cow_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) return ocfs2_xattr_get_clusters(inode, v_cluster, p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) num_clusters, &xv->xr_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) extent_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) * Given a xattr value root, calculate the most meta/credits we need for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) * refcount tree change if we truncate it to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) int ocfs2_refcounted_xattr_delete_need(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) struct ocfs2_caching_info *ref_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) struct ocfs2_xattr_value_root *xv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) int *meta_add, int *credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) int ret = 0, index, ref_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) u32 p_cluster, num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) u32 cpos = 0, clusters = le32_to_cpu(xv->xr_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) struct ocfs2_refcount_block *rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) struct ocfs2_refcount_rec rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) struct buffer_head *ref_leaf_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) while (cpos < clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) ret = ocfs2_xattr_get_clusters(inode, cpos, &p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) &num_clusters, &xv->xr_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) cpos += num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) while (num_clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) ret = ocfs2_get_refcount_rec(ref_ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) p_cluster, num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) &rec, &index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) &ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) BUG_ON(!rec.r_refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) * We really don't know whether the other clusters is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) * this refcount block or not, so just take the worst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) * case that all the clusters are in this block and each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) * one will split a refcount rec, so totally we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) * clusters * 2 new refcount rec.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) if (le16_to_cpu(rb->rf_records.rl_used) + clusters * 2 >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) le16_to_cpu(rb->rf_records.rl_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) ref_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) *credits += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) brelse(ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) ref_leaf_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) if (num_clusters <= le32_to_cpu(rec.r_clusters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) num_clusters -= le32_to_cpu(rec.r_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) p_cluster += num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) *meta_add += ref_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) if (!ref_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) struct ocfs2_extent_tree et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) ocfs2_init_refcount_extent_tree(&et, ref_ci, ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) *credits += ocfs2_calc_extend_credits(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) et.et_root_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) brelse(ref_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) * Do CoW for xattr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) int ocfs2_refcount_cow_xattr(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) struct ocfs2_dinode *di,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) struct ocfs2_xattr_value_buf *vb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) struct ocfs2_refcount_tree *ref_tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) u32 cpos, u32 write_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) struct ocfs2_post_refcount *post)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) struct ocfs2_xattr_value_root *xv = vb->vb_xv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) struct ocfs2_cow_context *context = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) u32 cow_start, cow_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) BUG_ON(!ocfs2_is_refcount_inode(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) ret = ocfs2_refcount_cal_cow_clusters(inode, &xv->xr_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) cpos, write_len, UINT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) &cow_start, &cow_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) BUG_ON(cow_len == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) if (!context) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) context->inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) context->cow_start = cow_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) context->cow_len = cow_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) context->ref_tree = ref_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) context->ref_root_bh = ref_root_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) context->cow_object = xv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_jbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) /* We need the extra credits for duplicate_clusters by jbd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) context->extra_credits =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) ocfs2_clusters_to_blocks(inode->i_sb, 1) * cow_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) context->get_clusters = ocfs2_xattr_value_get_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) context->post_refcount = post;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) ocfs2_init_xattr_value_extent_tree(&context->data_et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) INODE_CACHE(inode), vb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) ret = ocfs2_replace_cow(context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) kfree(context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) * Insert a new extent into refcount tree and mark a extent rec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) * as refcounted in the dinode tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) int ocfs2_add_refcount_flag(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) struct ocfs2_extent_tree *data_et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) struct ocfs2_caching_info *ref_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) u32 cpos, u32 p_cluster, u32 num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) struct ocfs2_cached_dealloc_ctxt *dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) struct ocfs2_post_refcount *post)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) int credits = 1, ref_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) struct ocfs2_alloc_context *meta_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) /* We need to be able to handle at least an extent tree split. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) ref_blocks = ocfs2_extend_meta_needed(data_et->et_root_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) ref_ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) p_cluster, num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) &ref_blocks, &credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) trace_ocfs2_add_refcount_flag(ref_blocks, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) if (ref_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) ret = ocfs2_reserve_new_metadata_blocks(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) ref_blocks, &meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) if (post)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) credits += post->credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) handle = ocfs2_start_trans(osb, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) ret = ocfs2_mark_extent_refcounted(inode, data_et, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) cpos, num_clusters, p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) meta_ac, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) p_cluster, num_clusters, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) meta_ac, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) if (post && post->func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) ret = post->func(inode, handle, post->para);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) if (meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) ocfs2_free_alloc_context(meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) static int ocfs2_change_ctime(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) struct buffer_head *di_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) OCFS2_INODE_UPDATE_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) ocfs2_journal_dirty(handle, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) static int ocfs2_attach_refcount_tree(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) struct buffer_head *di_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) int ret, data_changed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) struct buffer_head *ref_root_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) struct ocfs2_refcount_tree *ref_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) unsigned int ext_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) loff_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) u32 cpos, num_clusters, clusters, p_cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) struct ocfs2_cached_dealloc_ctxt dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) struct ocfs2_extent_tree di_et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) ocfs2_init_dealloc_ctxt(&dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) if (!ocfs2_is_refcount_inode(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) ret = ocfs2_create_refcount_tree(inode, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) BUG_ON(!di->i_refcount_loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) ret = ocfs2_lock_refcount_tree(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) le64_to_cpu(di->i_refcount_loc), 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) &ref_tree, &ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) goto attach_xattr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) size = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) clusters = ocfs2_clusters_for_bytes(inode->i_sb, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) cpos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) while (cpos < clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) &num_clusters, &ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) ret = ocfs2_add_refcount_flag(inode, &di_et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) &ref_tree->rf_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) ref_root_bh, cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) p_cluster, num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) &dealloc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) data_changed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) cpos += num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) attach_xattr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) &ref_tree->rf_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) &dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) if (data_changed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) ret = ocfs2_change_ctime(inode, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) brelse(ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) ocfs2_schedule_truncate_log_flush(osb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) ocfs2_run_deallocs(osb, &dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) * Empty the extent map so that we may get the right extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) * record from the disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) ocfs2_extent_map_trunc(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) static int ocfs2_add_refcounted_extent(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) struct ocfs2_caching_info *ref_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) u32 cpos, u32 p_cluster, u32 num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) unsigned int ext_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) int credits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) struct ocfs2_alloc_context *meta_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) ret = ocfs2_lock_refcount_allocators(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) p_cluster, num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) et, ref_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) ref_root_bh, &meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) NULL, &credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) handle = ocfs2_start_trans(osb, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) ret = ocfs2_insert_extent(handle, et, cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) ocfs2_clusters_to_blocks(inode->i_sb, p_cluster),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) num_clusters, ext_flags, meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) ret = ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) p_cluster, num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) meta_ac, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) ret = dquot_alloc_space_nodirty(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) ocfs2_clusters_to_bytes(osb->sb, num_clusters));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) if (meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) ocfs2_free_alloc_context(meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) static int ocfs2_duplicate_inline_data(struct inode *s_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) struct buffer_head *s_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) struct inode *t_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) struct buffer_head *t_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) t_di->id2.i_data.id_count = s_di->id2.i_data.id_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) le16_to_cpu(s_di->id2.i_data.id_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) spin_lock(&OCFS2_I(t_inode)->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) spin_unlock(&OCFS2_I(t_inode)->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) ocfs2_journal_dirty(handle, t_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) static int ocfs2_duplicate_extent_list(struct inode *s_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) struct inode *t_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) struct buffer_head *t_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) struct ocfs2_caching_info *ref_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) struct buffer_head *ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) u32 p_cluster, num_clusters, clusters, cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) loff_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) unsigned int ext_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) struct ocfs2_extent_tree et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) size = i_size_read(s_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) cpos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) while (cpos < clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) &num_clusters, &ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) if (p_cluster) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) ret = ocfs2_add_refcounted_extent(t_inode, &et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) ref_ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) cpos, p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) ext_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) cpos += num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) * change the new file's attributes to the src.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) * reflink creates a snapshot of a file, that means the attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) * must be identical except for three exceptions - nlink, ino, and ctime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) static int ocfs2_complete_reflink(struct inode *s_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) struct buffer_head *s_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) struct inode *t_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) struct buffer_head *t_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) bool preserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) loff_t size = i_size_read(s_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) OCFS2_INODE_UPDATE_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) spin_lock(&OCFS2_I(t_inode)->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) spin_unlock(&OCFS2_I(t_inode)->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) i_size_write(t_inode, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) t_inode->i_blocks = s_inode->i_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) di->i_xattr_inline_size = s_di->i_xattr_inline_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) di->i_clusters = s_di->i_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) di->i_size = s_di->i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) di->i_dyn_features = s_di->i_dyn_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) di->i_attr = s_di->i_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) if (preserve) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) t_inode->i_uid = s_inode->i_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) t_inode->i_gid = s_inode->i_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) t_inode->i_mode = s_inode->i_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) di->i_uid = s_di->i_uid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) di->i_gid = s_di->i_gid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) di->i_mode = s_di->i_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) * update time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) * we want mtime to appear identical to the source and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) * update ctime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) t_inode->i_ctime = current_time(t_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) t_inode->i_mtime = s_inode->i_mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) di->i_mtime = s_di->i_mtime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) di->i_mtime_nsec = s_di->i_mtime_nsec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) ocfs2_journal_dirty(handle, t_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) static int ocfs2_create_reflink_node(struct inode *s_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) struct buffer_head *s_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) struct inode *t_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) struct buffer_head *t_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) bool preserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) struct buffer_head *ref_root_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) struct ocfs2_cached_dealloc_ctxt dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) struct ocfs2_refcount_tree *ref_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) ocfs2_init_dealloc_ctxt(&dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) ret = ocfs2_set_refcount_tree(t_inode, t_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) le64_to_cpu(di->i_refcount_loc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) ret = ocfs2_duplicate_inline_data(s_inode, s_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) t_inode, t_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 1, &ref_tree, &ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) &ref_tree->rf_ci, ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) &dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) goto out_unlock_refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) out_unlock_refcount:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) brelse(ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) if (ocfs2_dealloc_has_cluster(&dealloc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) ocfs2_schedule_truncate_log_flush(osb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) ocfs2_run_deallocs(osb, &dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) static int __ocfs2_reflink(struct dentry *old_dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) struct buffer_head *old_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) struct inode *new_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) bool preserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) struct inode *inode = d_inode(old_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) struct buffer_head *new_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_SYSTEM_FILE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) ret = filemap_fdatawrite(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) ret = ocfs2_attach_refcount_tree(inode, old_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) inode_lock_nested(new_inode, I_MUTEX_CHILD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) ret = ocfs2_inode_lock_nested(new_inode, &new_bh, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) OI_LS_REFLINK_TARGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) ret = ocfs2_create_reflink_node(inode, old_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) new_inode, new_bh, preserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) goto inode_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) ret = ocfs2_reflink_xattrs(inode, old_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) new_inode, new_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) preserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) goto inode_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) ret = ocfs2_complete_reflink(inode, old_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) new_inode, new_bh, preserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) inode_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) ocfs2_inode_unlock(new_inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) brelse(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) inode_unlock(new_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) ret = filemap_fdatawait(inode->i_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) struct dentry *new_dentry, bool preserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) int error, had_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) struct inode *inode = d_inode(old_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) struct buffer_head *old_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) struct inode *new_orphan_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) struct ocfs2_lock_holder oh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) &new_orphan_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) mlog_errno(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) error = ocfs2_rw_lock(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) mlog_errno(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) error = ocfs2_inode_lock(inode, &old_bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) mlog_errno(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) ocfs2_rw_unlock(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) down_write(&OCFS2_I(inode)->ip_xattr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) down_write(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) error = __ocfs2_reflink(old_dentry, old_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) new_orphan_inode, preserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) up_write(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) up_write(&OCFS2_I(inode)->ip_xattr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) ocfs2_inode_unlock(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) ocfs2_rw_unlock(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) brelse(old_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) mlog_errno(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) had_lock = ocfs2_inode_lock_tracker(new_orphan_inode, NULL, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) &oh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) if (had_lock < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) error = had_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) mlog_errno(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) /* If the security isn't preserved, we need to re-initialize them. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) if (!preserve) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) &new_dentry->d_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) mlog_errno(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) mlog_errno(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) ocfs2_inode_unlock_tracker(new_orphan_inode, 1, &oh, had_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) if (new_orphan_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) * We need to open_unlock the inode no matter whether we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) * succeed or not, so that other nodes can delete it later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) ocfs2_open_unlock(new_orphan_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) iput(new_orphan_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) * Below here are the bits used by OCFS2_IOC_REFLINK() to fake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) * sys_reflink(). This will go away when vfs_reflink() exists in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) * fs/namei.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) /* copied from may_create in VFS. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) static inline int ocfs2_may_create(struct inode *dir, struct dentry *child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) if (d_really_is_positive(child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) if (IS_DEADDIR(dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) return inode_permission(dir, MAY_WRITE | MAY_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) * ocfs2_vfs_reflink - Create a reference-counted link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) * @old_dentry: source dentry + inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) * @dir: directory to create the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) * @new_dentry: target dentry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) * @preserve: if true, preserve all file attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) static int ocfs2_vfs_reflink(struct dentry *old_dentry, struct inode *dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) struct dentry *new_dentry, bool preserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) struct inode *inode = d_inode(old_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) if (!inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) error = ocfs2_may_create(dir, new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) if (dir->i_sb != inode->i_sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) return -EXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) * A reflink to an append-only or immutable file cannot be created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) /* Only regular files can be reflinked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) if (!S_ISREG(inode->i_mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) * If the caller wants to preserve ownership, they require the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) * rights to do so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) if (preserve) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) if (!uid_eq(current_fsuid(), inode->i_uid) && !capable(CAP_CHOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) if (!in_group_p(inode->i_gid) && !capable(CAP_CHOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) * If the caller is modifying any aspect of the attributes, they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) * are not creating a snapshot. They need read permission on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) * file.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) if (!preserve) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) error = inode_permission(inode, MAY_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) error = dquot_initialize(dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) error = ocfs2_reflink(old_dentry, dir, new_dentry, preserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) if (!error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) fsnotify_create(dir, new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) * Most codes are copied from sys_linkat.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) int ocfs2_reflink_ioctl(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) const char __user *oldname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) const char __user *newname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) bool preserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) struct dentry *new_dentry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) struct path old_path, new_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) error = user_path_at(AT_FDCWD, oldname, 0, &old_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) mlog_errno(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) new_dentry = user_path_create(AT_FDCWD, newname, &new_path, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) error = PTR_ERR(new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) if (IS_ERR(new_dentry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) mlog_errno(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) error = -EXDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) if (old_path.mnt != new_path.mnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) mlog_errno(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) goto out_dput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) error = ocfs2_vfs_reflink(old_path.dentry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) d_inode(new_path.dentry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) new_dentry, preserve);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) out_dput:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) done_path_create(&new_path, new_dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) path_put(&old_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) /* Update destination inode size, if necessary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) int ocfs2_reflink_update_dest(struct inode *dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) struct buffer_head *d_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) loff_t newlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) dest->i_blocks = ocfs2_inode_sector_count(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) if (newlen <= i_size_read(dest))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) handle = ocfs2_start_trans(OCFS2_SB(dest->i_sb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) OCFS2_INODE_UPDATE_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) /* Extend i_size if needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) spin_lock(&OCFS2_I(dest)->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) if (newlen > i_size_read(dest))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) i_size_write(dest, newlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) spin_unlock(&OCFS2_I(dest)->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) dest->i_ctime = dest->i_mtime = current_time(dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) ret = ocfs2_mark_inode_dirty(handle, dest, d_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) ocfs2_commit_trans(OCFS2_SB(dest->i_sb), handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) /* Remap the range pos_in:len in s_inode to pos_out:len in t_inode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) static loff_t ocfs2_reflink_remap_extent(struct inode *s_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) struct buffer_head *s_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) loff_t pos_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) struct inode *t_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) struct buffer_head *t_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) loff_t pos_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) loff_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) struct ocfs2_extent_tree s_et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) struct ocfs2_extent_tree t_et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) struct ocfs2_dinode *dis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) struct buffer_head *ref_root_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) struct ocfs2_refcount_tree *ref_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) struct ocfs2_super *osb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) loff_t remapped_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) loff_t pstart, plen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) u32 p_cluster, num_clusters, slast, spos, tpos, remapped_clus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) unsigned int ext_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) osb = OCFS2_SB(s_inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) dis = (struct ocfs2_dinode *)s_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) ocfs2_init_dinode_extent_tree(&s_et, INODE_CACHE(s_inode), s_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) ocfs2_init_dinode_extent_tree(&t_et, INODE_CACHE(t_inode), t_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) spos = ocfs2_bytes_to_clusters(s_inode->i_sb, pos_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) tpos = ocfs2_bytes_to_clusters(t_inode->i_sb, pos_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) slast = ocfs2_clusters_for_bytes(s_inode->i_sb, pos_in + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) while (spos < slast) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) if (fatal_signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) ret = -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) /* Look up the extent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) ret = ocfs2_get_clusters(s_inode, spos, &p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) &num_clusters, &ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) num_clusters = min_t(u32, num_clusters, slast - spos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) /* Punch out the dest range. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) pstart = ocfs2_clusters_to_bytes(t_inode->i_sb, tpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) plen = ocfs2_clusters_to_bytes(t_inode->i_sb, num_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) ret = ocfs2_remove_inode_range(t_inode, t_bh, pstart, plen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) if (p_cluster == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) goto next_loop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) /* Lock the refcount btree... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) ret = ocfs2_lock_refcount_tree(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) le64_to_cpu(dis->i_refcount_loc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) 1, &ref_tree, &ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) /* Mark s_inode's extent as refcounted. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) ret = ocfs2_add_refcount_flag(s_inode, &s_et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) &ref_tree->rf_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) ref_root_bh, spos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) p_cluster, num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) dealloc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) goto out_unlock_refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) /* Map in the new extent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) ext_flags |= OCFS2_EXT_REFCOUNTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) ret = ocfs2_add_refcounted_extent(t_inode, &t_et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) &ref_tree->rf_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) ref_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) tpos, p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) ext_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) goto out_unlock_refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) brelse(ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) next_loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) spos += num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) tpos += num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) remapped_clus += num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) out_unlock_refcount:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) brelse(ref_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) remapped_bytes = ocfs2_clusters_to_bytes(t_inode->i_sb, remapped_clus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) remapped_bytes = min_t(loff_t, len, remapped_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) return remapped_bytes > 0 ? remapped_bytes : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) /* Set up refcount tree and remap s_inode to t_inode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) loff_t ocfs2_reflink_remap_blocks(struct inode *s_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) struct buffer_head *s_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) loff_t pos_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) struct inode *t_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) struct buffer_head *t_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) loff_t pos_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) loff_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) struct ocfs2_cached_dealloc_ctxt dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) struct ocfs2_super *osb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) struct ocfs2_dinode *dis;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) struct ocfs2_dinode *dit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) loff_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) osb = OCFS2_SB(s_inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) dis = (struct ocfs2_dinode *)s_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) dit = (struct ocfs2_dinode *)t_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) ocfs2_init_dealloc_ctxt(&dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) * If we're reflinking the entire file and the source is inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) * data, just copy the contents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) if (pos_in == pos_out && pos_in == 0 && len == i_size_read(s_inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) i_size_read(t_inode) <= len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) ret = ocfs2_duplicate_inline_data(s_inode, s_bh, t_inode, t_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) * If both inodes belong to two different refcount groups then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) * forget it because we don't know how (or want) to go merging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) * refcount trees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) ret = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) if (ocfs2_is_refcount_inode(s_inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) ocfs2_is_refcount_inode(t_inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) le64_to_cpu(dis->i_refcount_loc) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) le64_to_cpu(dit->i_refcount_loc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) /* Neither inode has a refcount tree. Add one to s_inode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) if (!ocfs2_is_refcount_inode(s_inode) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) !ocfs2_is_refcount_inode(t_inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) ret = ocfs2_create_refcount_tree(s_inode, s_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) /* Ensure that both inodes end up with the same refcount tree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) if (!ocfs2_is_refcount_inode(s_inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) ret = ocfs2_set_refcount_tree(s_inode, s_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) le64_to_cpu(dit->i_refcount_loc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) if (!ocfs2_is_refcount_inode(t_inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) ret = ocfs2_set_refcount_tree(t_inode, t_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) le64_to_cpu(dis->i_refcount_loc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) /* Turn off inline data in the dest file. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) if (OCFS2_I(t_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) ret = ocfs2_convert_inline_data_to_extents(t_inode, t_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) /* Actually remap extents now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) ret = ocfs2_reflink_remap_extent(s_inode, s_bh, pos_in, t_inode, t_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) pos_out, len, &dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) if (ocfs2_dealloc_has_cluster(&dealloc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) ocfs2_schedule_truncate_log_flush(osb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) ocfs2_run_deallocs(osb, &dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) /* Lock an inode and grab a bh pointing to the inode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) int ocfs2_reflink_inodes_lock(struct inode *s_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) struct buffer_head **bh_s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) struct inode *t_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) struct buffer_head **bh_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) struct inode *inode1 = s_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) struct inode *inode2 = t_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) struct ocfs2_inode_info *oi1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) struct ocfs2_inode_info *oi2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) struct buffer_head *bh1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) struct buffer_head *bh2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) bool same_inode = (s_inode == t_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) bool need_swap = (inode1->i_ino > inode2->i_ino);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) /* First grab the VFS and rw locks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) lock_two_nondirectories(s_inode, t_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) if (need_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) swap(inode1, inode2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) status = ocfs2_rw_lock(inode1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) goto out_i1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) if (!same_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) status = ocfs2_rw_lock(inode2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) goto out_i2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) /* Now go for the cluster locks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) oi1 = OCFS2_I(inode1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) oi2 = OCFS2_I(inode2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) (unsigned long long)oi2->ip_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) /* We always want to lock the one with the lower lockid first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) if (oi1->ip_blkno > oi2->ip_blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) mlog_errno(-ENOLCK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) /* lock id1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) status = ocfs2_inode_lock_nested(inode1, &bh1, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) OI_LS_REFLINK_TARGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) if (status != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) goto out_rw2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) /* lock id2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) if (!same_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) status = ocfs2_inode_lock_nested(inode2, &bh2, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) OI_LS_REFLINK_TARGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) if (status != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) goto out_cl1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) bh2 = bh1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) * If we swapped inode order above, we have to swap the buffer heads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) * before passing them back to the caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) if (need_swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) swap(bh1, bh2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) *bh_s = bh1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) *bh_t = bh2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) trace_ocfs2_double_lock_end(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) (unsigned long long)oi1->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) (unsigned long long)oi2->ip_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) out_cl1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) ocfs2_inode_unlock(inode1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) brelse(bh1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) out_rw2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) ocfs2_rw_unlock(inode2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) out_i2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) ocfs2_rw_unlock(inode1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) out_i1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) unlock_two_nondirectories(s_inode, t_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) /* Unlock both inodes and release buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) void ocfs2_reflink_inodes_unlock(struct inode *s_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) struct buffer_head *s_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) struct inode *t_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) struct buffer_head *t_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) ocfs2_inode_unlock(s_inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) ocfs2_rw_unlock(s_inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) brelse(s_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) if (s_inode != t_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) ocfs2_inode_unlock(t_inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) ocfs2_rw_unlock(t_inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) brelse(t_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) unlock_two_nondirectories(s_inode, t_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) }