^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* -*- mode: c; c-basic-offset: 8; -*-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * vim: noexpandtab sw=8 ts=8 sts=0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * alloc.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Extent allocs and frees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2002, 2004 Oracle. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/quotaops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <cluster/masklog.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "ocfs2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "alloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "aops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "blockcheck.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "dlmglue.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "extent_map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "journal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "localalloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include "suballoc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include "sysfile.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include "file.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include "super.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include "uptodate.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "xattr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "refcounttree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "ocfs2_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "buffer_head_io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) enum ocfs2_contig_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) CONTIG_NONE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) CONTIG_LEFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) CONTIG_RIGHT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) CONTIG_LEFTRIGHT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static enum ocfs2_contig_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) ocfs2_extent_rec_contig(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct ocfs2_extent_rec *ext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct ocfs2_extent_rec *insert_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Operations for a specific extent tree type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * To implement an on-disk btree (extent tree) type in ocfs2, add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * an ocfs2_extent_tree_operations structure and the matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * ocfs2_init_<thingy>_extent_tree() function. That's pretty much it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * for the allocation portion of the extent tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct ocfs2_extent_tree_operations {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * last_eb_blk is the block number of the right most leaf extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * block. Most on-disk structures containing an extent tree store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * this value for fast access. The ->eo_set_last_eb_blk() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * ->eo_get_last_eb_blk() operations access this value. They are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * both required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) void (*eo_set_last_eb_blk)(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u64 blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) u64 (*eo_get_last_eb_blk)(struct ocfs2_extent_tree *et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * The on-disk structure usually keeps track of how many total
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * clusters are stored in this extent tree. This function updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * that value. new_clusters is the delta, and must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * added to the total. Required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) void (*eo_update_clusters)(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u32 new_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * If this extent tree is supported by an extent map, insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * a record into the map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) void (*eo_extent_map_insert)(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct ocfs2_extent_rec *rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * If this extent tree is supported by an extent map, truncate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * map to clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) void (*eo_extent_map_truncate)(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u32 clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * If ->eo_insert_check() exists, it is called before rec is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * inserted into the extent tree. It is optional.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) int (*eo_insert_check)(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct ocfs2_extent_rec *rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) int (*eo_sanity_check)(struct ocfs2_extent_tree *et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * --------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * The remaining are internal to ocfs2_extent_tree and don't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * accessor functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * ->eo_fill_root_el() takes et->et_object and sets et->et_root_el.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * It is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) void (*eo_fill_root_el)(struct ocfs2_extent_tree *et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * ->eo_fill_max_leaf_clusters sets et->et_max_leaf_clusters if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * it exists. If it does not, et->et_max_leaf_clusters is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * to 0 (unlimited). Optional.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) void (*eo_fill_max_leaf_clusters)(struct ocfs2_extent_tree *et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * ->eo_extent_contig test whether the 2 ocfs2_extent_rec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * are contiguous or not. Optional. Don't need to set it if use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * ocfs2_extent_rec as the tree leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) enum ocfs2_contig_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) (*eo_extent_contig)(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct ocfs2_extent_rec *ext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct ocfs2_extent_rec *insert_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * Pre-declare ocfs2_dinode_et_ops so we can use it as a sanity check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * in the methods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) u64 blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static void ocfs2_dinode_update_clusters(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u32 clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static void ocfs2_dinode_extent_map_insert(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct ocfs2_extent_rec *rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void ocfs2_dinode_extent_map_truncate(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) u32 clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static int ocfs2_dinode_insert_check(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct ocfs2_extent_rec *rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) static int ocfs2_dinode_sanity_check(struct ocfs2_extent_tree *et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static int ocfs2_reuse_blk_from_dealloc(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct buffer_head **new_eb_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int blk_wanted, int *blk_given);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static int ocfs2_is_dealloc_empty(struct ocfs2_extent_tree *et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static const struct ocfs2_extent_tree_operations ocfs2_dinode_et_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) .eo_set_last_eb_blk = ocfs2_dinode_set_last_eb_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) .eo_get_last_eb_blk = ocfs2_dinode_get_last_eb_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) .eo_update_clusters = ocfs2_dinode_update_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) .eo_extent_map_insert = ocfs2_dinode_extent_map_insert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) .eo_extent_map_truncate = ocfs2_dinode_extent_map_truncate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) .eo_insert_check = ocfs2_dinode_insert_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) .eo_sanity_check = ocfs2_dinode_sanity_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) .eo_fill_root_el = ocfs2_dinode_fill_root_el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u64 blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct ocfs2_dinode *di = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) BUG_ON(et->et_ops != &ocfs2_dinode_et_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) di->i_last_eb_blk = cpu_to_le64(blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct ocfs2_dinode *di = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) BUG_ON(et->et_ops != &ocfs2_dinode_et_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return le64_to_cpu(di->i_last_eb_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void ocfs2_dinode_update_clusters(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) u32 clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct ocfs2_inode_info *oi = cache_info_to_inode(et->et_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) struct ocfs2_dinode *di = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) le32_add_cpu(&di->i_clusters, clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) oi->ip_clusters = le32_to_cpu(di->i_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static void ocfs2_dinode_extent_map_insert(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct ocfs2_extent_rec *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct inode *inode = &cache_info_to_inode(et->et_ci)->vfs_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ocfs2_extent_map_insert_rec(inode, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static void ocfs2_dinode_extent_map_truncate(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) u32 clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct inode *inode = &cache_info_to_inode(et->et_ci)->vfs_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ocfs2_extent_map_trunc(inode, clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static int ocfs2_dinode_insert_check(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct ocfs2_extent_rec *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct ocfs2_inode_info *oi = cache_info_to_inode(et->et_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct ocfs2_super *osb = OCFS2_SB(oi->vfs_inode.i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) BUG_ON(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) mlog_bug_on_msg(!ocfs2_sparse_alloc(osb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) (oi->ip_clusters != le32_to_cpu(rec->e_cpos)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) "Device %s, asking for sparse allocation: inode %llu, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) "cpos %u, clusters %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) osb->dev_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) (unsigned long long)oi->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) rec->e_cpos, oi->ip_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static int ocfs2_dinode_sanity_check(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct ocfs2_dinode *di = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) BUG_ON(et->et_ops != &ocfs2_dinode_et_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) BUG_ON(!OCFS2_IS_VALID_DINODE(di));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct ocfs2_dinode *di = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) et->et_root_el = &di->id2.i_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static void ocfs2_xattr_value_fill_root_el(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) struct ocfs2_xattr_value_buf *vb = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) et->et_root_el = &vb->vb_xv->xr_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static void ocfs2_xattr_value_set_last_eb_blk(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) u64 blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct ocfs2_xattr_value_buf *vb = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) vb->vb_xv->xr_last_eb_blk = cpu_to_le64(blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static u64 ocfs2_xattr_value_get_last_eb_blk(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct ocfs2_xattr_value_buf *vb = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return le64_to_cpu(vb->vb_xv->xr_last_eb_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void ocfs2_xattr_value_update_clusters(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) u32 clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct ocfs2_xattr_value_buf *vb = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) le32_add_cpu(&vb->vb_xv->xr_clusters, clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static const struct ocfs2_extent_tree_operations ocfs2_xattr_value_et_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) .eo_set_last_eb_blk = ocfs2_xattr_value_set_last_eb_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) .eo_get_last_eb_blk = ocfs2_xattr_value_get_last_eb_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) .eo_update_clusters = ocfs2_xattr_value_update_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) .eo_fill_root_el = ocfs2_xattr_value_fill_root_el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static void ocfs2_xattr_tree_fill_root_el(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct ocfs2_xattr_block *xb = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) et->et_root_el = &xb->xb_attrs.xb_root.xt_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static void ocfs2_xattr_tree_fill_max_leaf_clusters(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) et->et_max_leaf_clusters =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ocfs2_clusters_for_bytes(sb, OCFS2_MAX_XATTR_TREE_LEAF_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static void ocfs2_xattr_tree_set_last_eb_blk(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) u64 blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct ocfs2_xattr_block *xb = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) xt->xt_last_eb_blk = cpu_to_le64(blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static u64 ocfs2_xattr_tree_get_last_eb_blk(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct ocfs2_xattr_block *xb = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return le64_to_cpu(xt->xt_last_eb_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static void ocfs2_xattr_tree_update_clusters(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) u32 clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct ocfs2_xattr_block *xb = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static const struct ocfs2_extent_tree_operations ocfs2_xattr_tree_et_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) .eo_set_last_eb_blk = ocfs2_xattr_tree_set_last_eb_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) .eo_get_last_eb_blk = ocfs2_xattr_tree_get_last_eb_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) .eo_update_clusters = ocfs2_xattr_tree_update_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) .eo_fill_root_el = ocfs2_xattr_tree_fill_root_el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) .eo_fill_max_leaf_clusters = ocfs2_xattr_tree_fill_max_leaf_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static void ocfs2_dx_root_set_last_eb_blk(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) u64 blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) struct ocfs2_dx_root_block *dx_root = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) dx_root->dr_last_eb_blk = cpu_to_le64(blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static u64 ocfs2_dx_root_get_last_eb_blk(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct ocfs2_dx_root_block *dx_root = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return le64_to_cpu(dx_root->dr_last_eb_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static void ocfs2_dx_root_update_clusters(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) u32 clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct ocfs2_dx_root_block *dx_root = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) le32_add_cpu(&dx_root->dr_clusters, clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static int ocfs2_dx_root_sanity_check(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) struct ocfs2_dx_root_block *dx_root = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) BUG_ON(!OCFS2_IS_VALID_DX_ROOT(dx_root));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) static void ocfs2_dx_root_fill_root_el(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct ocfs2_dx_root_block *dx_root = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) et->et_root_el = &dx_root->dr_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static const struct ocfs2_extent_tree_operations ocfs2_dx_root_et_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) .eo_set_last_eb_blk = ocfs2_dx_root_set_last_eb_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) .eo_get_last_eb_blk = ocfs2_dx_root_get_last_eb_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) .eo_update_clusters = ocfs2_dx_root_update_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) .eo_sanity_check = ocfs2_dx_root_sanity_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) .eo_fill_root_el = ocfs2_dx_root_fill_root_el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static void ocfs2_refcount_tree_fill_root_el(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct ocfs2_refcount_block *rb = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) et->et_root_el = &rb->rf_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static void ocfs2_refcount_tree_set_last_eb_blk(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) u64 blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct ocfs2_refcount_block *rb = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) rb->rf_last_eb_blk = cpu_to_le64(blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static u64 ocfs2_refcount_tree_get_last_eb_blk(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct ocfs2_refcount_block *rb = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return le64_to_cpu(rb->rf_last_eb_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static void ocfs2_refcount_tree_update_clusters(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) u32 clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct ocfs2_refcount_block *rb = et->et_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) le32_add_cpu(&rb->rf_clusters, clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static enum ocfs2_contig_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) ocfs2_refcount_tree_extent_contig(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct ocfs2_extent_rec *ext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct ocfs2_extent_rec *insert_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return CONTIG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static const struct ocfs2_extent_tree_operations ocfs2_refcount_tree_et_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) .eo_set_last_eb_blk = ocfs2_refcount_tree_set_last_eb_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) .eo_get_last_eb_blk = ocfs2_refcount_tree_get_last_eb_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) .eo_update_clusters = ocfs2_refcount_tree_update_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) .eo_fill_root_el = ocfs2_refcount_tree_fill_root_el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) .eo_extent_contig = ocfs2_refcount_tree_extent_contig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static void __ocfs2_init_extent_tree(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct buffer_head *bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ocfs2_journal_access_func access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) void *obj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) const struct ocfs2_extent_tree_operations *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) et->et_ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) et->et_root_bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) et->et_ci = ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) et->et_root_journal_access = access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (!obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) obj = (void *)bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) et->et_object = obj;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) et->et_dealloc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) et->et_ops->eo_fill_root_el(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (!et->et_ops->eo_fill_max_leaf_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) et->et_max_leaf_clusters = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) et->et_ops->eo_fill_max_leaf_clusters(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) void ocfs2_init_dinode_extent_tree(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_di,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) NULL, &ocfs2_dinode_et_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_xb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) NULL, &ocfs2_xattr_tree_et_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct ocfs2_xattr_value_buf *vb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) __ocfs2_init_extent_tree(et, ci, vb->vb_bh, vb->vb_access, vb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) &ocfs2_xattr_value_et_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) void ocfs2_init_dx_root_extent_tree(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_dr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) NULL, &ocfs2_dx_root_et_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) void ocfs2_init_refcount_extent_tree(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_rb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) NULL, &ocfs2_refcount_tree_et_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static inline void ocfs2_et_set_last_eb_blk(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) u64 new_last_eb_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) et->et_ops->eo_set_last_eb_blk(et, new_last_eb_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) static inline u64 ocfs2_et_get_last_eb_blk(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) return et->et_ops->eo_get_last_eb_blk(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static inline void ocfs2_et_update_clusters(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) u32 clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) et->et_ops->eo_update_clusters(et, clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static inline void ocfs2_et_extent_map_insert(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) struct ocfs2_extent_rec *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (et->et_ops->eo_extent_map_insert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) et->et_ops->eo_extent_map_insert(et, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static inline void ocfs2_et_extent_map_truncate(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) u32 clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (et->et_ops->eo_extent_map_truncate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) et->et_ops->eo_extent_map_truncate(et, clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) static inline int ocfs2_et_root_journal_access(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return et->et_root_journal_access(handle, et->et_ci, et->et_root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static inline enum ocfs2_contig_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ocfs2_et_extent_contig(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct ocfs2_extent_rec *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) struct ocfs2_extent_rec *insert_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (et->et_ops->eo_extent_contig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return et->et_ops->eo_extent_contig(et, rec, insert_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return ocfs2_extent_rec_contig(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) rec, insert_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static inline int ocfs2_et_insert_check(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct ocfs2_extent_rec *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (et->et_ops->eo_insert_check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) ret = et->et_ops->eo_insert_check(et, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static inline int ocfs2_et_sanity_check(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (et->et_ops->eo_sanity_check)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ret = et->et_ops->eo_sanity_check(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct ocfs2_extent_block *eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static void ocfs2_adjust_rightmost_records(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct ocfs2_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct ocfs2_extent_rec *insert_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * Reset the actual path elements so that we can re-use the structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * to build another path. Generally, this involves freeing the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * heads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) int i, start = 0, depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct ocfs2_path_item *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (keep_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) start = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) for(i = start; i < path_num_items(path); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) node = &path->p_node[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) brelse(node->bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) node->bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) node->el = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * Tree depth may change during truncate, or insert. If we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * keeping the root extent list, then make sure that our path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * structure reflects the proper depth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (keep_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) depth = le16_to_cpu(path_root_el(path)->l_tree_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) path_root_access(path) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) path->p_tree_depth = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) void ocfs2_free_path(struct ocfs2_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ocfs2_reinit_path(path, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) kfree(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * All the elements of src into dest. After this call, src could be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * without affecting dest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * Both paths should have the same root. Any non-root elements of dest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * will be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) static void ocfs2_cp_path(struct ocfs2_path *dest, struct ocfs2_path *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) BUG_ON(path_root_bh(dest) != path_root_bh(src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) BUG_ON(path_root_el(dest) != path_root_el(src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) BUG_ON(path_root_access(dest) != path_root_access(src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) ocfs2_reinit_path(dest, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) dest->p_node[i].bh = src->p_node[i].bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) dest->p_node[i].el = src->p_node[i].el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (dest->p_node[i].bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) get_bh(dest->p_node[i].bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * Make the *dest path the same as src and re-initialize src path to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * have a root only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) static void ocfs2_mv_path(struct ocfs2_path *dest, struct ocfs2_path *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) BUG_ON(path_root_bh(dest) != path_root_bh(src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) BUG_ON(path_root_access(dest) != path_root_access(src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) brelse(dest->p_node[i].bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) dest->p_node[i].bh = src->p_node[i].bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) dest->p_node[i].el = src->p_node[i].el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) src->p_node[i].bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) src->p_node[i].el = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * Insert an extent block at given index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * This will not take an additional reference on eb_bh.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) static inline void ocfs2_path_insert_eb(struct ocfs2_path *path, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct buffer_head *eb_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)eb_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * Right now, no root bh is an extent block, so this helps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * catch code errors with dinode trees. The assertion can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * safely removed if we ever need to insert extent block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * structures at the root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) BUG_ON(index == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) path->p_node[index].bh = eb_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) path->p_node[index].el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct ocfs2_extent_list *root_el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) ocfs2_journal_access_func access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) struct ocfs2_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) BUG_ON(le16_to_cpu(root_el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) path = kzalloc(sizeof(*path), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) path->p_tree_depth = le16_to_cpu(root_el->l_tree_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) get_bh(root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) path_root_bh(path) = root_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) path_root_el(path) = root_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) path_root_access(path) = access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct ocfs2_path *ocfs2_new_path_from_path(struct ocfs2_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) return ocfs2_new_path(path_root_bh(path), path_root_el(path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) path_root_access(path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) return ocfs2_new_path(et->et_root_bh, et->et_root_el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) et->et_root_journal_access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * Journal the buffer at depth idx. All idx>0 are extent_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * otherwise it's the root_access function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * I don't like the way this function's name looks next to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * ocfs2_journal_access_path(), but I don't have a better one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) int ocfs2_path_bh_journal_access(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct ocfs2_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) ocfs2_journal_access_func access = path_root_access(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (!access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) access = ocfs2_journal_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) access = ocfs2_journal_access_eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) return access(handle, ci, path->p_node[idx].bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * Convenience function to journal all components in a path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) int ocfs2_journal_access_path(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct ocfs2_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (!path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) for(i = 0; i < path_num_items(path); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ret = ocfs2_path_bh_journal_access(handle, ci, path, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * Return the index of the extent record which contains cluster #v_cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * -1 is returned if it was not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * Should work fine on interior and exterior nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) u32 rec_end, rec_start, clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) rec = &el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) rec_start = le32_to_cpu(rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) clusters = ocfs2_rec_clusters(el, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) rec_end = rec_start + clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (v_cluster >= rec_start && v_cluster < rec_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) ret = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * NOTE: ocfs2_block_extent_contig(), ocfs2_extents_adjacent() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * ocfs2_extent_rec_contig only work properly against leaf nodes!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static int ocfs2_block_extent_contig(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct ocfs2_extent_rec *ext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) u64 blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) u64 blk_end = le64_to_cpu(ext->e_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) blk_end += ocfs2_clusters_to_blocks(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) le16_to_cpu(ext->e_leaf_clusters));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return blkno == blk_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static int ocfs2_extents_adjacent(struct ocfs2_extent_rec *left,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct ocfs2_extent_rec *right)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) u32 left_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) left_range = le32_to_cpu(left->e_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) le16_to_cpu(left->e_leaf_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return (left_range == le32_to_cpu(right->e_cpos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static enum ocfs2_contig_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ocfs2_extent_rec_contig(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct ocfs2_extent_rec *ext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct ocfs2_extent_rec *insert_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) u64 blkno = le64_to_cpu(insert_rec->e_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * Refuse to coalesce extent records with different flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * fields - we don't want to mix unwritten extents with user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (ext->e_flags != insert_rec->e_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) return CONTIG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (ocfs2_extents_adjacent(ext, insert_rec) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) ocfs2_block_extent_contig(sb, ext, blkno))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return CONTIG_RIGHT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) blkno = le64_to_cpu(ext->e_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (ocfs2_extents_adjacent(insert_rec, ext) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) ocfs2_block_extent_contig(sb, insert_rec, blkno))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return CONTIG_LEFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) return CONTIG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * NOTE: We can have pretty much any combination of contiguousness and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * appending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * The usefulness of APPEND_TAIL is more in that it lets us know that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * we'll have to update the path to that leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) enum ocfs2_append_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) APPEND_NONE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) APPEND_TAIL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) enum ocfs2_split_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) SPLIT_NONE = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) SPLIT_LEFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) SPLIT_RIGHT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) struct ocfs2_insert_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) enum ocfs2_split_type ins_split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) enum ocfs2_append_type ins_appending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) enum ocfs2_contig_type ins_contig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) int ins_contig_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) int ins_tree_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct ocfs2_merge_ctxt {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) enum ocfs2_contig_type c_contig_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) int c_has_empty_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) int c_split_covers_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) static int ocfs2_validate_extent_block(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) struct ocfs2_extent_block *eb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) (struct ocfs2_extent_block *)bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) trace_ocfs2_validate_extent_block((unsigned long long)bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) BUG_ON(!buffer_uptodate(bh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * If the ecc fails, we return the error but otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * leave the filesystem running. We know any error is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * local to this block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &eb->h_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) mlog(ML_ERROR, "Checksum failed for extent block %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) (unsigned long long)bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * Errors after here are fatal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) rc = ocfs2_error(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) "Extent block #%llu has bad signature %.*s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) (unsigned long long)bh->b_blocknr, 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) eb->h_signature);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (le64_to_cpu(eb->h_blkno) != bh->b_blocknr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) rc = ocfs2_error(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) "Extent block #%llu has an invalid h_blkno of %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) (unsigned long long)bh->b_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) (unsigned long long)le64_to_cpu(eb->h_blkno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (le32_to_cpu(eb->h_fs_generation) != OCFS2_SB(sb)->fs_generation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) rc = ocfs2_error(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) "Extent block #%llu has an invalid h_fs_generation of #%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) (unsigned long long)bh->b_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) le32_to_cpu(eb->h_fs_generation));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) int ocfs2_read_extent_block(struct ocfs2_caching_info *ci, u64 eb_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) struct buffer_head **bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct buffer_head *tmp = *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) rc = ocfs2_read_block(ci, eb_blkno, &tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) ocfs2_validate_extent_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) /* If ocfs2_read_block() got us a new bh, pass it up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (!rc && !*bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) *bh = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) * How many free extents have we got before we need more meta data?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) int ocfs2_num_free_extents(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) struct ocfs2_extent_list *el = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) struct buffer_head *eb_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) u64 last_eb_blk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) el = et->et_root_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) last_eb_blk = ocfs2_et_get_last_eb_blk(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (last_eb_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) retval = ocfs2_read_extent_block(et->et_ci, last_eb_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) &eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) if (retval < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) mlog_errno(retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) eb = (struct ocfs2_extent_block *) eb_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) BUG_ON(el->l_tree_depth != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) retval = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) brelse(eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) trace_ocfs2_num_free_extents(retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /* expects array to already be allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * sets h_signature, h_blkno, h_suballoc_bit, h_suballoc_slot, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * l_count for you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) static int ocfs2_create_new_meta_bhs(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) int wanted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) struct buffer_head *bhs[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) int count, status, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) u16 suballoc_bit_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) u32 num_got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) u64 suballoc_loc, first_blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct ocfs2_super *osb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) while (count < wanted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) status = ocfs2_claim_metadata(handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) wanted - count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) &suballoc_loc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) &suballoc_bit_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) &num_got,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) &first_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) for(i = count; i < (num_got + count); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) bhs[i] = sb_getblk(osb->sb, first_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) if (bhs[i] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) ocfs2_set_new_buffer_uptodate(et->et_ci, bhs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) status = ocfs2_journal_access_eb(handle, et->et_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) bhs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) OCFS2_JOURNAL_ACCESS_CREATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) memset(bhs[i]->b_data, 0, osb->sb->s_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) eb = (struct ocfs2_extent_block *) bhs[i]->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /* Ok, setup the minimal stuff here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) eb->h_blkno = cpu_to_le64(first_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) eb->h_suballoc_slot =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) cpu_to_le16(meta_ac->ac_alloc_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) eb->h_suballoc_loc = cpu_to_le64(suballoc_loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) eb->h_list.l_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) suballoc_bit_start++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) first_blkno++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /* We'll also be dirtied by the caller, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * this isn't absolutely necessary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) ocfs2_journal_dirty(handle, bhs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) count += num_got;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) for(i = 0; i < wanted; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) brelse(bhs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) bhs[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * Helper function for ocfs2_add_branch() and ocfs2_shift_tree_depth().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) * Returns the sum of the rightmost extent rec logical offset and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * cluster count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * ocfs2_add_branch() uses this to determine what logical cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * value should be populated into the leftmost new branch records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * ocfs2_shift_tree_depth() uses this to determine the # clusters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * value for the new topmost tree record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) static inline u32 ocfs2_sum_rightmost_rec(struct ocfs2_extent_list *el)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) i = le16_to_cpu(el->l_next_free_rec) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return le32_to_cpu(el->l_recs[i].e_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) ocfs2_rec_clusters(el, &el->l_recs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) * Change range of the branches in the right most path according to the leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * extent block's rightmost record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) static int ocfs2_adjust_rightmost_branch(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) struct ocfs2_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) path = ocfs2_new_path_from_et(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) status = ocfs2_find_path(et->et_ci, path, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) status = ocfs2_extend_trans(handle, path_num_items(path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) status = ocfs2_journal_access_path(et->et_ci, handle, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) rec = &el->l_recs[le16_to_cpu(el->l_next_free_rec) - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) ocfs2_adjust_rightmost_records(handle, et, path, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) ocfs2_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * Add an entire tree branch to our inode. eb_bh is the extent block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * to start at, if we don't want to start the branch at the root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) * structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * last_eb_bh is required as we have to update it's next_leaf pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) * for the new last extent block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) * the new branch will be 'empty' in the sense that every block will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * contain a single record with cluster count == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) static int ocfs2_add_branch(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) struct buffer_head *eb_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) struct buffer_head **last_eb_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) struct ocfs2_alloc_context *meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) int status, new_blocks, i, block_given = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) u64 next_blkno, new_last_eb_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct buffer_head **new_eb_bhs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) struct ocfs2_extent_list *eb_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) u32 new_cpos, root_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) BUG_ON(!last_eb_bh || !*last_eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (eb_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) eb = (struct ocfs2_extent_block *) eb_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) el = et->et_root_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /* we never add a branch to a leaf. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) BUG_ON(!el->l_tree_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) new_blocks = le16_to_cpu(el->l_tree_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) root_end = ocfs2_sum_rightmost_rec(et->et_root_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * If there is a gap before the root end and the real end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * of the righmost leaf block, we need to remove the gap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * between new_cpos and root_end first so that the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * is consistent after we add a new branch(it will start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * from new_cpos).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (root_end > new_cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) trace_ocfs2_adjust_rightmost_branch(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) root_end, new_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) status = ocfs2_adjust_rightmost_branch(handle, et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /* allocate the number of new eb blocks we need */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) new_eb_bhs = kcalloc(new_blocks, sizeof(struct buffer_head *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (!new_eb_bhs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) /* Firstyly, try to reuse dealloc since we have already estimated how
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * many extent blocks we may use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (!ocfs2_is_dealloc_empty(et)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) status = ocfs2_reuse_blk_from_dealloc(handle, et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) new_eb_bhs, new_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) &block_given);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) BUG_ON(block_given > new_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (block_given < new_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) BUG_ON(!meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) status = ocfs2_create_new_meta_bhs(handle, et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) new_blocks - block_given,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) &new_eb_bhs[block_given]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * linked with the rest of the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * conversly, new_eb_bhs[0] is the new bottommost leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * when we leave the loop, new_last_eb_blk will point to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * newest leaf, and next_blkno will point to the topmost extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) next_blkno = new_last_eb_blk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) for(i = 0; i < new_blocks; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) bh = new_eb_bhs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) eb = (struct ocfs2_extent_block *) bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) /* ocfs2_create_new_meta_bhs() should create it right! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) eb_el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) status = ocfs2_journal_access_eb(handle, et->et_ci, bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) OCFS2_JOURNAL_ACCESS_CREATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) eb->h_next_leaf_blk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) eb_el->l_tree_depth = cpu_to_le16(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) eb_el->l_next_free_rec = cpu_to_le16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) * This actually counts as an empty extent as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * c_clusters == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) eb_el->l_recs[0].e_cpos = cpu_to_le32(new_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) * eb_el isn't always an interior node, but even leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * nodes want a zero'd flags and reserved field so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) * this gets the whole 32 bits regardless of use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) eb_el->l_recs[0].e_int_clusters = cpu_to_le32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (!eb_el->l_tree_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) new_last_eb_blk = le64_to_cpu(eb->h_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) ocfs2_journal_dirty(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) next_blkno = le64_to_cpu(eb->h_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /* This is a bit hairy. We want to update up to three blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * here without leaving any of them in an inconsistent state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * in case of error. We don't have to worry about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * journal_dirty erroring as it won't unless we've aborted the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * handle (in which case we would never be here) so reserving
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * the write with journal_access is all we need to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) status = ocfs2_journal_access_eb(handle, et->et_ci, *last_eb_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) status = ocfs2_et_root_journal_access(handle, et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) if (eb_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) status = ocfs2_journal_access_eb(handle, et->et_ci, eb_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) /* Link the new branch into the rest of the tree (el will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * either be on the root_bh, or the extent block passed in. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) i = le16_to_cpu(el->l_next_free_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) el->l_recs[i].e_blkno = cpu_to_le64(next_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) el->l_recs[i].e_int_clusters = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) le16_add_cpu(&el->l_next_free_rec, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) /* fe needs a new last extent block pointer, as does the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * next_leaf on the previously last-extent-block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) ocfs2_et_set_last_eb_blk(et, new_last_eb_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) ocfs2_journal_dirty(handle, *last_eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) ocfs2_journal_dirty(handle, et->et_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (eb_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) ocfs2_journal_dirty(handle, eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * Some callers want to track the rightmost leaf so pass it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * back here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) brelse(*last_eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) get_bh(new_eb_bhs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) *last_eb_bh = new_eb_bhs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) if (new_eb_bhs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) for (i = 0; i < new_blocks; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) brelse(new_eb_bhs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) kfree(new_eb_bhs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * adds another level to the allocation tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * returns back the new extent block so you can add a branch to it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * after this call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) static int ocfs2_shift_tree_depth(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct buffer_head **ret_new_eb_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) int status, i, block_given = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) u32 new_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) struct buffer_head *new_eb_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) struct ocfs2_extent_list *root_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) struct ocfs2_extent_list *eb_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (!ocfs2_is_dealloc_empty(et)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) status = ocfs2_reuse_blk_from_dealloc(handle, et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) &new_eb_bh, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) &block_given);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) } else if (meta_ac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) &new_eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) eb = (struct ocfs2_extent_block *) new_eb_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /* ocfs2_create_new_meta_bhs() should create it right! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) eb_el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) root_el = et->et_root_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) status = ocfs2_journal_access_eb(handle, et->et_ci, new_eb_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) OCFS2_JOURNAL_ACCESS_CREATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /* copy the root extent list data into the new extent block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) eb_el->l_tree_depth = root_el->l_tree_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) eb_el->l_next_free_rec = root_el->l_next_free_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) for (i = 0; i < le16_to_cpu(root_el->l_next_free_rec); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) eb_el->l_recs[i] = root_el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) ocfs2_journal_dirty(handle, new_eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) status = ocfs2_et_root_journal_access(handle, et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) new_clusters = ocfs2_sum_rightmost_rec(eb_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) /* update root_bh now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) le16_add_cpu(&root_el->l_tree_depth, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) root_el->l_recs[0].e_cpos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) root_el->l_recs[0].e_blkno = eb->h_blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) root_el->l_recs[0].e_int_clusters = cpu_to_le32(new_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) for (i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) root_el->l_next_free_rec = cpu_to_le16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) /* If this is our 1st tree depth shift, then last_eb_blk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * becomes the allocated extent block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (root_el->l_tree_depth == cpu_to_le16(1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) ocfs2_journal_dirty(handle, et->et_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) *ret_new_eb_bh = new_eb_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) new_eb_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) brelse(new_eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * Should only be called when there is no space left in any of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * leaf nodes. What we want to do is find the lowest tree depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * non-leaf extent block with room for new records. There are three
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * valid results of this search:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * 1) a lowest extent block is found, then we pass it back in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * *lowest_eb_bh and return '0'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * 2) the search fails to find anything, but the root_el has room. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * pass NULL back in *lowest_eb_bh, but still return '0'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * 3) the search fails to find anything AND the root_el is full, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * which case we return > 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) * return status < 0 indicates an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) struct buffer_head **target_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) int status = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) u64 blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) struct buffer_head *lowest_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) *target_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) el = et->et_root_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) while(le16_to_cpu(el->l_tree_depth) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) if (le16_to_cpu(el->l_next_free_rec) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) status = ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) "Owner %llu has empty extent list (next_free_rec == 0)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) i = le16_to_cpu(el->l_next_free_rec) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) blkno = le64_to_cpu(el->l_recs[i].e_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (!blkno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) status = ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) "Owner %llu has extent list where extent # %d has no physical block start\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) status = ocfs2_read_extent_block(et->et_ci, blkno, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) eb = (struct ocfs2_extent_block *) bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (le16_to_cpu(el->l_next_free_rec) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) le16_to_cpu(el->l_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) brelse(lowest_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) lowest_bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) get_bh(lowest_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) /* If we didn't find one and the fe doesn't have any room,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) * then return '1' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) el = et->et_root_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (!lowest_bh && (el->l_next_free_rec == el->l_count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) status = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) *target_bh = lowest_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * Grow a b-tree so that it has more records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * We might shift the tree depth in which case existing paths should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * be considered invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) * Tree depth after the grow is returned via *final_depth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) * *last_eb_bh will be updated by ocfs2_add_branch().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) int *final_depth, struct buffer_head **last_eb_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct ocfs2_alloc_context *meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) int ret, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) struct ocfs2_extent_list *el = et->et_root_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) int depth = le16_to_cpu(el->l_tree_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) BUG_ON(meta_ac == NULL && ocfs2_is_dealloc_empty(et));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) shift = ocfs2_find_branch_target(et, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (shift < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) ret = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) /* We traveled all the way to the bottom of the allocation tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * and didn't find room for any more extents - we need to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * another tree level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (shift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) BUG_ON(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) trace_ocfs2_grow_tree(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) /* ocfs2_shift_tree_depth will return us a buffer with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * the new extent block (so we can pass that to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * ocfs2_add_branch). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) ret = ocfs2_shift_tree_depth(handle, et, meta_ac, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) depth++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (depth == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) * Special case: we have room now if we shifted from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) * tree_depth 0, so no more work needs to be done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) * We won't be calling add_branch, so pass
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * back *last_eb_bh as the new leaf. At depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * zero, it should always be null so there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * no reason to brelse.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) BUG_ON(*last_eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) get_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) *last_eb_bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) /* call ocfs2_add_branch to add the final part of the tree with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) * the new data. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) ret = ocfs2_add_branch(handle, et, bh, last_eb_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (final_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) *final_depth = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) * This function will discard the rightmost extent record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) static void ocfs2_shift_records_right(struct ocfs2_extent_list *el)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) int next_free = le16_to_cpu(el->l_next_free_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) int count = le16_to_cpu(el->l_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) unsigned int num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) BUG_ON(!next_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) /* This will cause us to go off the end of our extent list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) BUG_ON(next_free >= count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) num_bytes = sizeof(struct ocfs2_extent_rec) * next_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) memmove(&el->l_recs[1], &el->l_recs[0], num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) struct ocfs2_extent_rec *insert_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) int i, insert_index, next_free, has_empty, num_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) u32 insert_cpos = le32_to_cpu(insert_rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) next_free = le16_to_cpu(el->l_next_free_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) has_empty = ocfs2_is_empty_extent(&el->l_recs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) BUG_ON(!next_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) /* The tree code before us didn't allow enough room in the leaf. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) BUG_ON(el->l_next_free_rec == el->l_count && !has_empty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) * The easiest way to approach this is to just remove the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * empty extent and temporarily decrement next_free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) if (has_empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) * If next_free was 1 (only an empty extent), this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) * loop won't execute, which is fine. We still want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) * the decrement above to happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) for(i = 0; i < (next_free - 1); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) el->l_recs[i] = el->l_recs[i+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) next_free--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) * Figure out what the new record index should be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) for(i = 0; i < next_free; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) rec = &el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (insert_cpos < le32_to_cpu(rec->e_cpos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) insert_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) trace_ocfs2_rotate_leaf(insert_cpos, insert_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) has_empty, next_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) le16_to_cpu(el->l_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) BUG_ON(insert_index < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) BUG_ON(insert_index >= le16_to_cpu(el->l_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) BUG_ON(insert_index > next_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * No need to memmove if we're just adding to the tail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (insert_index != next_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) BUG_ON(next_free >= le16_to_cpu(el->l_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) num_bytes = next_free - insert_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) num_bytes *= sizeof(struct ocfs2_extent_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) memmove(&el->l_recs[insert_index + 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) &el->l_recs[insert_index],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) num_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) * Either we had an empty extent, and need to re-increment or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) * there was no empty extent on a non full rightmost leaf node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) * in which case we still need to increment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) next_free++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) el->l_next_free_rec = cpu_to_le16(next_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) * Make sure none of the math above just messed up our tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) BUG_ON(le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) el->l_recs[insert_index] = *insert_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) static void ocfs2_remove_empty_extent(struct ocfs2_extent_list *el)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) int size, num_recs = le16_to_cpu(el->l_next_free_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) BUG_ON(num_recs == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (ocfs2_is_empty_extent(&el->l_recs[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) num_recs--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) size = num_recs * sizeof(struct ocfs2_extent_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) memmove(&el->l_recs[0], &el->l_recs[1], size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) memset(&el->l_recs[num_recs], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) sizeof(struct ocfs2_extent_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) el->l_next_free_rec = cpu_to_le16(num_recs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) * Create an empty extent record .
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) * l_next_free_rec may be updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) * If an empty extent already exists do nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) static void ocfs2_create_empty_extent(struct ocfs2_extent_list *el)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) int next_free = le16_to_cpu(el->l_next_free_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) if (next_free == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) goto set_and_inc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) if (ocfs2_is_empty_extent(&el->l_recs[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) mlog_bug_on_msg(el->l_count == el->l_next_free_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) "Asked to create an empty extent in a full list:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) "count = %u, tree depth = %u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) le16_to_cpu(el->l_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) le16_to_cpu(el->l_tree_depth));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) ocfs2_shift_records_right(el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) set_and_inc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) le16_add_cpu(&el->l_next_free_rec, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) * For a rotation which involves two leaf nodes, the "root node" is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) * the lowest level tree node which contains a path to both leafs. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) * resulting set of information can be used to form a complete "subtree"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * This function is passed two full paths from the dinode down to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * pair of adjacent leaves. It's task is to figure out which path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * index contains the subtree root - this can be the root index itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) * in a worst-case rotation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) * The array index of the subtree root is passed back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) struct ocfs2_path *left,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) struct ocfs2_path *right)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) * Check that the caller passed in two paths from the same tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) BUG_ON(path_root_bh(left) != path_root_bh(right));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) * The caller didn't pass two adjacent paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) mlog_bug_on_msg(i > left->p_tree_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) "Owner %llu, left depth %u, right depth %u\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) "left leaf blk %llu, right leaf blk %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) left->p_tree_depth, right->p_tree_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) (unsigned long long)path_leaf_bh(left)->b_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) (unsigned long long)path_leaf_bh(right)->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) } while (left->p_node[i].bh->b_blocknr ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) right->p_node[i].bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) return i - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) typedef void (path_insert_t)(void *, struct buffer_head *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) * Traverse a btree path in search of cpos, starting at root_el.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) * This code can be called with a cpos larger than the tree, in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * case it will return the rightmost path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) static int __ocfs2_find_path(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) struct ocfs2_extent_list *root_el, u32 cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) path_insert_t *func, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) u32 range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) u64 blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) el = root_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) while (el->l_tree_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (le16_to_cpu(el->l_next_free_rec) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) ocfs2_error(ocfs2_metadata_cache_get_super(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) "Owner %llu has empty extent list at depth %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) (unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) le16_to_cpu(el->l_tree_depth));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) for(i = 0; i < le16_to_cpu(el->l_next_free_rec) - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) rec = &el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) * In the case that cpos is off the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * tree, this should just wind up returning the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) * rightmost record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) range = le32_to_cpu(rec->e_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) ocfs2_rec_clusters(el, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) blkno = le64_to_cpu(el->l_recs[i].e_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) if (blkno == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) ocfs2_error(ocfs2_metadata_cache_get_super(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) "Owner %llu has bad blkno in extent list at depth %u (index %d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) (unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) le16_to_cpu(el->l_tree_depth), i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) ret = ocfs2_read_extent_block(ci, blkno, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) eb = (struct ocfs2_extent_block *) bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (le16_to_cpu(el->l_next_free_rec) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) le16_to_cpu(el->l_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) ocfs2_error(ocfs2_metadata_cache_get_super(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) "Owner %llu has bad count in extent list at block %llu (next free=%u, count=%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) (unsigned long long)ocfs2_metadata_cache_owner(ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) (unsigned long long)bh->b_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) le16_to_cpu(el->l_next_free_rec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) le16_to_cpu(el->l_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) if (func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) func(data, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) * Catch any trailing bh that the loop didn't handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) * Given an initialized path (that is, it has a valid root extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * list), this function will traverse the btree in search of the path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * which would contain cpos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * The path traveled is recorded in the path structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * Note that this will not do any comparisons on leaf node extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * records, so it will work fine in the case that we just added a tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) struct find_path_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) struct ocfs2_path *path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) static void find_path_ins(void *data, struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) struct find_path_data *fp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) get_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) ocfs2_path_insert_eb(fp->path, fp->index, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) fp->index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) int ocfs2_find_path(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) struct ocfs2_path *path, u32 cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) struct find_path_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) data.index = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) data.path = path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) return __ocfs2_find_path(ci, path_root_el(path), cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) find_path_ins, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) static void find_leaf_ins(void *data, struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) struct ocfs2_extent_block *eb =(struct ocfs2_extent_block *)bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) struct ocfs2_extent_list *el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) struct buffer_head **ret = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) /* We want to retain only the leaf block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (le16_to_cpu(el->l_tree_depth) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) get_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) *ret = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * Find the leaf block in the tree which would contain cpos. No
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * checking of the actual leaf is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * Some paths want to call this instead of allocating a path structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * and calling ocfs2_find_path().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) * This function doesn't handle non btree extent lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) int ocfs2_find_leaf(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct ocfs2_extent_list *root_el, u32 cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) struct buffer_head **leaf_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) ret = __ocfs2_find_path(ci, root_el, cpos, find_leaf_ins, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) *leaf_bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) * Adjust the adjacent records (left_rec, right_rec) involved in a rotation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) * Basically, we've moved stuff around at the bottom of the tree and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) * we need to fix up the extent records above the changes to reflect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) * the new changes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) * left_rec: the record on the left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) * right_rec: the record to the right of left_rec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) * right_child_el: is the child list pointed to by right_rec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * By definition, this only works on interior nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) struct ocfs2_extent_rec *right_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) struct ocfs2_extent_list *right_child_el)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) u32 left_clusters, right_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) * Interior nodes never have holes. Their cpos is the cpos of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) * the leftmost record in their child list. Their cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) * count covers the full theoretical range of their child list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * - the range between their cpos and the cpos of the record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * immediately to their right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) if (!ocfs2_rec_clusters(right_child_el, &right_child_el->l_recs[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) BUG_ON(right_child_el->l_tree_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) left_clusters -= le32_to_cpu(left_rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) left_rec->e_int_clusters = cpu_to_le32(left_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) * Calculate the rightmost cluster count boundary before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * moving cpos - we will need to adjust clusters after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * updating e_cpos to keep the same highest cluster count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) right_end = le32_to_cpu(right_rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) right_end += le32_to_cpu(right_rec->e_int_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) right_rec->e_cpos = left_rec->e_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) le32_add_cpu(&right_rec->e_cpos, left_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) right_end -= le32_to_cpu(right_rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) right_rec->e_int_clusters = cpu_to_le32(right_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) * Adjust the adjacent root node records involved in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) * rotation. left_el_blkno is passed in as a key so that we can easily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) * find it's index in the root list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) static void ocfs2_adjust_root_records(struct ocfs2_extent_list *root_el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) struct ocfs2_extent_list *left_el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) struct ocfs2_extent_list *right_el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) u64 left_el_blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) BUG_ON(le16_to_cpu(root_el->l_tree_depth) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) le16_to_cpu(left_el->l_tree_depth));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) for(i = 0; i < le16_to_cpu(root_el->l_next_free_rec) - 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) if (le64_to_cpu(root_el->l_recs[i].e_blkno) == left_el_blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) * The path walking code should have never returned a root and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) * two paths which are not adjacent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) BUG_ON(i >= (le16_to_cpu(root_el->l_next_free_rec) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) ocfs2_adjust_adjacent_records(&root_el->l_recs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) &root_el->l_recs[i + 1], right_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) * We've changed a leaf block (in right_path) and need to reflect that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) * change back up the subtree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) * This happens in multiple places:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) * - When we've moved an extent record from the left path leaf to the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) * path leaf to make room for an empty extent in the left path leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) * - When our insert into the right path leaf is at the leftmost edge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) * and requires an update of the path immediately to it's left. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) * can occur at the end of some types of rotation and appending inserts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) * - When we've adjusted the last extent record in the left path leaf and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) * 1st extent record in the right path leaf during cross extent block merge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) static void ocfs2_complete_edge_insert(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) struct ocfs2_path *left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) struct ocfs2_path *right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) int subtree_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) int i, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) struct ocfs2_extent_list *el, *left_el, *right_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) struct ocfs2_extent_rec *left_rec, *right_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) struct buffer_head *root_bh = left_path->p_node[subtree_index].bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * Update the counts and position values within all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * interior nodes to reflect the leaf rotation we just did.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) * The root node is handled below the loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) * We begin the loop with right_el and left_el pointing to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) * leaf lists and work our way up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) * NOTE: within this loop, left_el and right_el always refer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) * to the *child* lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) left_el = path_leaf_el(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) right_el = path_leaf_el(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) trace_ocfs2_complete_edge_insert(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) * One nice property of knowing that all of these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) * nodes are below the root is that we only deal with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) * the leftmost right node record and the rightmost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) * left node record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) el = left_path->p_node[i].el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) idx = le16_to_cpu(left_el->l_next_free_rec) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) left_rec = &el->l_recs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) el = right_path->p_node[i].el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) right_rec = &el->l_recs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) ocfs2_adjust_adjacent_records(left_rec, right_rec, right_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) ocfs2_journal_dirty(handle, left_path->p_node[i].bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) ocfs2_journal_dirty(handle, right_path->p_node[i].bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) * Setup our list pointers now so that the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) * parents become children in the next iteration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) left_el = left_path->p_node[i].el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) right_el = right_path->p_node[i].el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) * At the root node, adjust the two adjacent records which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) * begin our path to the leaves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) el = left_path->p_node[subtree_index].el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) left_el = left_path->p_node[subtree_index + 1].el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) right_el = right_path->p_node[subtree_index + 1].el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) ocfs2_adjust_root_records(el, left_el, right_el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) left_path->p_node[subtree_index + 1].bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) root_bh = left_path->p_node[subtree_index].bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) ocfs2_journal_dirty(handle, root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) static int ocfs2_rotate_subtree_right(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) struct ocfs2_path *left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) struct ocfs2_path *right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) int subtree_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) struct buffer_head *right_leaf_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) struct buffer_head *left_leaf_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) struct buffer_head *root_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) struct ocfs2_extent_list *right_el, *left_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) struct ocfs2_extent_rec move_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) left_leaf_bh = path_leaf_bh(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) left_el = path_leaf_el(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) if (left_el->l_next_free_rec != left_el->l_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) "Inode %llu has non-full interior leaf node %llu (next free = %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) (unsigned long long)left_leaf_bh->b_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) le16_to_cpu(left_el->l_next_free_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) return -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * This extent block may already have an empty record, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) * return early if so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) if (ocfs2_is_empty_extent(&left_el->l_recs[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) root_bh = left_path->p_node[subtree_index].bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) subtree_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) for(i = subtree_index + 1; i < path_num_items(right_path); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) right_path, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) left_path, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) right_leaf_bh = path_leaf_bh(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) right_el = path_leaf_el(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) /* This is a code error, not a disk corruption. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) mlog_bug_on_msg(!right_el->l_next_free_rec, "Inode %llu: Rotate fails "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) "because rightmost leaf block %llu is empty\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) (unsigned long long)right_leaf_bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) ocfs2_create_empty_extent(right_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) ocfs2_journal_dirty(handle, right_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) /* Do the copy now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) i = le16_to_cpu(left_el->l_next_free_rec) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) move_rec = left_el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) right_el->l_recs[0] = move_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) * Clear out the record we just copied and shift everything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) * over, leaving an empty extent in the left leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) * We temporarily subtract from next_free_rec so that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * shift will lose the tail record (which is now defunct).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) le16_add_cpu(&left_el->l_next_free_rec, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) ocfs2_shift_records_right(left_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) memset(&left_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) le16_add_cpu(&left_el->l_next_free_rec, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) ocfs2_journal_dirty(handle, left_leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) ocfs2_complete_edge_insert(handle, left_path, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) subtree_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) * Given a full path, determine what cpos value would return us a path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) * containing the leaf immediately to the left of the current one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) * Will return zero if the path passed in is already the leftmost path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) int ocfs2_find_cpos_for_left_leaf(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) struct ocfs2_path *path, u32 *cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) int i, j, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) u64 blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) BUG_ON(path->p_tree_depth == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) *cpos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) blkno = path_leaf_bh(path)->b_blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) /* Start at the tree node just above the leaf and work our way up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) i = path->p_tree_depth - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) while (i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) el = path->p_node[i].el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) * Find the extent record just before the one in our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) * path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) if (j == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * We've determined that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) * path specified is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) * the leftmost one - return a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) * cpos of zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * The leftmost record points to our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * leaf - we need to travel up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) * tree one level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) goto next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) *cpos = le32_to_cpu(el->l_recs[j - 1].e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) *cpos = *cpos + ocfs2_rec_clusters(el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) &el->l_recs[j - 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) *cpos = *cpos - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) * If we got here, we never found a valid node where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) * the tree indicated one should be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) ocfs2_error(sb, "Invalid extent tree at extent block %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) (unsigned long long)blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) next_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) blkno = path->p_node[i].bh->b_blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) * Extend the transaction by enough credits to complete the rotation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) * and still leave at least the original number of credits allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) * to this transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) int op_credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) struct ocfs2_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) if (jbd2_handle_buffer_credits(handle) < credits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) ret = ocfs2_extend_trans(handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) credits - jbd2_handle_buffer_credits(handle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) * Trap the case where we're inserting into the theoretical range past
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) * the _actual_ left leaf range. Otherwise, we'll rotate a record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) * whose cpos is less than ours into the right leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) * It's only necessary to look at the rightmost record of the left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) * leaf because the logic that calls us should ensure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) * theoretical ranges in the path components above the leaves are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) * correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) static int ocfs2_rotate_requires_path_adjustment(struct ocfs2_path *left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) u32 insert_cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) struct ocfs2_extent_list *left_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) int next_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) left_el = path_leaf_el(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) next_free = le16_to_cpu(left_el->l_next_free_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) rec = &left_el->l_recs[next_free - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) if (insert_cpos > le32_to_cpu(rec->e_cpos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) static int ocfs2_leftmost_rec_contains(struct ocfs2_extent_list *el, u32 cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) int next_free = le16_to_cpu(el->l_next_free_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) unsigned int range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) if (next_free == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) rec = &el->l_recs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) if (ocfs2_is_empty_extent(rec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) /* Empty list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) if (next_free == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) rec = &el->l_recs[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) * Rotate all the records in a btree right one record, starting at insert_cpos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) * The path to the rightmost leaf should be passed in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) * The array is assumed to be large enough to hold an entire path (tree depth).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) * Upon successful return from this function:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) * - The 'right_path' array will contain a path to the leaf block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) * whose range contains e_cpos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) * - That leaf block will have a single empty extent in list index 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) * - In the case that the rotation requires a post-insert update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) * *ret_left_path will contain a valid path which can be passed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) * ocfs2_insert_path().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) static int ocfs2_rotate_tree_right(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) enum ocfs2_split_type split,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) u32 insert_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) struct ocfs2_path *right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) struct ocfs2_path **ret_left_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) int ret, start, orig_credits = jbd2_handle_buffer_credits(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) u32 cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) struct ocfs2_path *left_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) *ret_left_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) left_path = ocfs2_new_path_from_path(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) if (!left_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) ret = ocfs2_find_cpos_for_left_leaf(sb, right_path, &cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) trace_ocfs2_rotate_tree_right(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) insert_cpos, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) * What we want to do here is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) * 1) Start with the rightmost path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) * 2) Determine a path to the leaf block directly to the left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) * of that leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) * 3) Determine the 'subtree root' - the lowest level tree node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) * which contains a path to both leaves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) * 4) Rotate the subtree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) * 5) Find the next subtree by considering the left path to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) * the new right path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) * The check at the top of this while loop also accepts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) * insert_cpos == cpos because cpos is only a _theoretical_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) * value to get us the left path - insert_cpos might very well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) * be filling that hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) * Stop at a cpos of '0' because we either started at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) * leftmost branch (i.e., a tree with one branch and a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) * rotation inside of it), or we've gone as far as we can in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) * rotating subtrees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) while (cpos && insert_cpos <= cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) trace_ocfs2_rotate_tree_right(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) insert_cpos, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) ret = ocfs2_find_path(et->et_ci, left_path, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) mlog_bug_on_msg(path_leaf_bh(left_path) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) path_leaf_bh(right_path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) "Owner %llu: error during insert of %u "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) "(left path cpos %u) results in two identical "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) "paths ending at %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) insert_cpos, cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) path_leaf_bh(left_path)->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) if (split == SPLIT_NONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) ocfs2_rotate_requires_path_adjustment(left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) insert_cpos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) * We've rotated the tree as much as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) * should. The rest is up to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) * ocfs2_insert_path() to complete, after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) * record insertion. We indicate this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) * situation by returning the left path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) * The reason we don't adjust the records here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) * before the record insert is that an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) * later might break the rule where a parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) * record e_cpos will reflect the actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) * e_cpos of the 1st nonempty record of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) * child list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) *ret_left_path = left_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) goto out_ret_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) start = ocfs2_find_subtree_root(et, left_path, right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) trace_ocfs2_rotate_subtree(start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) right_path->p_node[start].bh->b_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) right_path->p_tree_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) ret = ocfs2_extend_rotate_transaction(handle, start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) orig_credits, right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) ret = ocfs2_rotate_subtree_right(handle, et, left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) right_path, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) if (split != SPLIT_NONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) ocfs2_leftmost_rec_contains(path_leaf_el(right_path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) insert_cpos)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) * A rotate moves the rightmost left leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) * record over to the leftmost right leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) * slot. If we're doing an extent split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) * instead of a real insert, then we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) * check that the extent to be split wasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) * just moved over. If it was, then we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) * exit here, passing left_path back -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) * ocfs2_split_extent() is smart enough to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) * search both leaves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) *ret_left_path = left_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) goto out_ret_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) * There is no need to re-read the next right path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) * as we know that it'll be our current left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) * path. Optimize by copying values instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) ocfs2_mv_path(right_path, left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) ret = ocfs2_find_cpos_for_left_leaf(sb, right_path, &cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) ocfs2_free_path(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) out_ret_path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) static int ocfs2_update_edge_lengths(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) struct ocfs2_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) int i, idx, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) u32 range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) ret = ocfs2_journal_access_path(et->et_ci, handle, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) /* Path should always be rightmost. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) BUG_ON(eb->h_next_leaf_blk != 0ULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) BUG_ON(le16_to_cpu(el->l_next_free_rec) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) idx = le16_to_cpu(el->l_next_free_rec) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) rec = &el->l_recs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) for (i = 0; i < path->p_tree_depth; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) el = path->p_node[i].el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) idx = le16_to_cpu(el->l_next_free_rec) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) rec = &el->l_recs[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) rec->e_int_clusters = cpu_to_le32(range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) le32_add_cpu(&rec->e_int_clusters, -le32_to_cpu(rec->e_cpos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) ocfs2_journal_dirty(handle, path->p_node[i].bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) static void ocfs2_unlink_path(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) struct ocfs2_cached_dealloc_ctxt *dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) struct ocfs2_path *path, int unlink_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) for(i = unlink_start; i < path_num_items(path); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) bh = path->p_node[i].bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) eb = (struct ocfs2_extent_block *)bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) * Not all nodes might have had their final count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) * decremented by the caller - handle this here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) if (le16_to_cpu(el->l_next_free_rec) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) mlog(ML_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) "Inode %llu, attempted to remove extent block "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) "%llu with %u records\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) (unsigned long long)le64_to_cpu(eb->h_blkno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) le16_to_cpu(el->l_next_free_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) ocfs2_journal_dirty(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) ocfs2_remove_from_cache(et->et_ci, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) el->l_next_free_rec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) ocfs2_journal_dirty(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) ret = ocfs2_cache_extent_block_free(dealloc, eb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) ocfs2_remove_from_cache(et->et_ci, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) static void ocfs2_unlink_subtree(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) struct ocfs2_path *left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) struct ocfs2_path *right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) int subtree_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) struct buffer_head *root_bh = left_path->p_node[subtree_index].bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) struct ocfs2_extent_list *root_el = left_path->p_node[subtree_index].el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) eb = (struct ocfs2_extent_block *)right_path->p_node[subtree_index + 1].bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) for(i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) if (root_el->l_recs[i].e_blkno == eb->h_blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) BUG_ON(i >= le16_to_cpu(root_el->l_next_free_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) le16_add_cpu(&root_el->l_next_free_rec, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) eb->h_next_leaf_blk = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) ocfs2_journal_dirty(handle, root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) ocfs2_unlink_path(handle, et, dealloc, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) subtree_index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) static int ocfs2_rotate_subtree_left(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) struct ocfs2_path *left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) struct ocfs2_path *right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) int subtree_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) struct ocfs2_cached_dealloc_ctxt *dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) int *deleted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) int ret, i, del_right_subtree = 0, right_has_empty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) struct buffer_head *root_bh, *et_root_bh = path_root_bh(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) struct ocfs2_extent_list *right_leaf_el, *left_leaf_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) *deleted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) right_leaf_el = path_leaf_el(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) left_leaf_el = path_leaf_el(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) root_bh = left_path->p_node[subtree_index].bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) if (!ocfs2_is_empty_extent(&left_leaf_el->l_recs[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) eb = (struct ocfs2_extent_block *)path_leaf_bh(right_path)->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) if (ocfs2_is_empty_extent(&right_leaf_el->l_recs[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) * It's legal for us to proceed if the right leaf is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) * the rightmost one and it has an empty extent. There
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) * are two cases to handle - whether the leaf will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) * empty after removal or not. If the leaf isn't empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) * then just remove the empty extent up front. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) * next block will handle empty leaves by flagging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) * them for unlink.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) * Non rightmost leaves will throw -EAGAIN and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) * caller can manually move the subtree and retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) if (eb->h_next_leaf_blk != 0ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) if (le16_to_cpu(right_leaf_el->l_next_free_rec) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) ret = ocfs2_journal_access_eb(handle, et->et_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) path_leaf_bh(right_path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) ocfs2_remove_empty_extent(right_leaf_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) right_has_empty = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) if (eb->h_next_leaf_blk == 0ULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) le16_to_cpu(right_leaf_el->l_next_free_rec) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) * We have to update i_last_eb_blk during the meta
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) * data delete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) ret = ocfs2_et_root_journal_access(handle, et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) del_right_subtree = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) * Getting here with an empty extent in the right path implies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) * that it's the rightmost path and will be deleted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) BUG_ON(right_has_empty && !del_right_subtree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) subtree_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) for(i = subtree_index + 1; i < path_num_items(right_path); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) right_path, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) left_path, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) if (!right_has_empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) * Only do this if we're moving a real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) * record. Otherwise, the action is delayed until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) * after removal of the right path in which case we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) * can do a simple shift to remove the empty extent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) ocfs2_rotate_leaf(left_leaf_el, &right_leaf_el->l_recs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) memset(&right_leaf_el->l_recs[0], 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) sizeof(struct ocfs2_extent_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) if (eb->h_next_leaf_blk == 0ULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) * Move recs over to get rid of empty extent, decrease
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) * next_free. This is allowed to remove the last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) * extent in our leaf (setting l_next_free_rec to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) * zero) - the delete code below won't care.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) ocfs2_remove_empty_extent(right_leaf_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) if (del_right_subtree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) ocfs2_unlink_subtree(handle, et, left_path, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) subtree_index, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) ret = ocfs2_update_edge_lengths(handle, et, left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) * Removal of the extent in the left leaf was skipped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) * above so we could delete the right path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) * 1st.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) if (right_has_empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) ocfs2_remove_empty_extent(left_leaf_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) ocfs2_journal_dirty(handle, et_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) *deleted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) ocfs2_complete_edge_insert(handle, left_path, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) subtree_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) * Given a full path, determine what cpos value would return us a path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) * containing the leaf immediately to the right of the current one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) * Will return zero if the path passed in is already the rightmost path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) * This looks similar, but is subtly different to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) * ocfs2_find_cpos_for_left_leaf().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) int ocfs2_find_cpos_for_right_leaf(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) struct ocfs2_path *path, u32 *cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) int i, j, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) u64 blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) *cpos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) if (path->p_tree_depth == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) blkno = path_leaf_bh(path)->b_blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) /* Start at the tree node just above the leaf and work our way up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) i = path->p_tree_depth - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) while (i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) int next_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) el = path->p_node[i].el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) * Find the extent record just after the one in our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) * path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) next_free = le16_to_cpu(el->l_next_free_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) if (j == (next_free - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) * We've determined that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) * path specified is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) * the rightmost one - return a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) * cpos of zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) * The rightmost record points to our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) * leaf - we need to travel up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) * tree one level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) goto next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) *cpos = le32_to_cpu(el->l_recs[j + 1].e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) * If we got here, we never found a valid node where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) * the tree indicated one should be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) ocfs2_error(sb, "Invalid extent tree at extent block %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) (unsigned long long)blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) next_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) blkno = path->p_node[i].bh->b_blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) static int ocfs2_rotate_rightmost_leaf_left(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) struct ocfs2_path *path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) struct buffer_head *bh = path_leaf_bh(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) struct ocfs2_extent_list *el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) if (!ocfs2_is_empty_extent(&el->l_recs[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) ret = ocfs2_path_bh_journal_access(handle, et->et_ci, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) path_num_items(path) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) ocfs2_remove_empty_extent(el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) ocfs2_journal_dirty(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) static int __ocfs2_rotate_tree_left(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) int orig_credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) struct ocfs2_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) struct ocfs2_cached_dealloc_ctxt *dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) struct ocfs2_path **empty_extent_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) int ret, subtree_root, deleted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) u32 right_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) struct ocfs2_path *left_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) struct ocfs2_path *right_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) if (!ocfs2_is_empty_extent(&(path_leaf_el(path)->l_recs[0])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) *empty_extent_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) ret = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) left_path = ocfs2_new_path_from_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) if (!left_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) ocfs2_cp_path(left_path, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) right_path = ocfs2_new_path_from_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) if (!right_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) while (right_cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) ret = ocfs2_find_path(et->et_ci, right_path, right_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) subtree_root = ocfs2_find_subtree_root(et, left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) trace_ocfs2_rotate_subtree(subtree_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) right_path->p_node[subtree_root].bh->b_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) right_path->p_tree_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) ret = ocfs2_extend_rotate_transaction(handle, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) orig_credits, left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) * Caller might still want to make changes to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) * tree root, so re-add it to the journal here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) left_path, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) ret = ocfs2_rotate_subtree_left(handle, et, left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) right_path, subtree_root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) dealloc, &deleted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) if (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) * The rotation has to temporarily stop due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) * the right subtree having an empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) * extent. Pass it back to the caller for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) * fixup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) *empty_extent_path = right_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) right_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) * The subtree rotate might have removed records on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) * the rightmost edge. If so, then rotation is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) * complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) if (deleted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) ocfs2_mv_path(left_path, right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) ret = ocfs2_find_cpos_for_right_leaf(sb, left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) &right_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) ocfs2_free_path(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) ocfs2_free_path(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) static int ocfs2_remove_rightmost_path(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) struct ocfs2_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) int ret, subtree_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) u32 cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) struct ocfs2_path *left_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) ret = ocfs2_et_sanity_check(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) ret = ocfs2_journal_access_path(et->et_ci, handle, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) path, &cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) if (cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) * We have a path to the left of this one - it needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) * an update too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) left_path = ocfs2_new_path_from_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) if (!left_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) ret = ocfs2_find_path(et->et_ci, left_path, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) ret = ocfs2_journal_access_path(et->et_ci, handle, left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) subtree_index = ocfs2_find_subtree_root(et, left_path, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) ocfs2_unlink_subtree(handle, et, left_path, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) subtree_index, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) ret = ocfs2_update_edge_lengths(handle, et, left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) eb = (struct ocfs2_extent_block *)path_leaf_bh(left_path)->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) * 'path' is also the leftmost path which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) * means it must be the only one. This gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) * handled differently because we want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) * revert the root back to having extents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) * in-line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) ocfs2_unlink_path(handle, et, dealloc, path, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) el = et->et_root_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) el->l_tree_depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) el->l_next_free_rec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) ocfs2_et_set_last_eb_blk(et, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) ocfs2_journal_dirty(handle, path_root_bh(path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) ocfs2_free_path(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) static int ocfs2_remove_rightmost_empty_extent(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) struct ocfs2_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) int credits = path->p_tree_depth * 2 + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) handle = ocfs2_start_trans(osb, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) ret = ocfs2_remove_rightmost_path(handle, et, path, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) * Left rotation of btree records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) * In many ways, this is (unsurprisingly) the opposite of right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) * rotation. We start at some non-rightmost path containing an empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) * extent in the leaf block. The code works its way to the rightmost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) * path by rotating records to the left in every subtree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) * This is used by any code which reduces the number of extent records
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) * in a leaf. After removal, an empty record should be placed in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) * leftmost list position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) * This won't handle a length update of the rightmost path records if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) * the rightmost tree leaf record is removed so the caller is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) * responsible for detecting and correcting that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) static int ocfs2_rotate_tree_left(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) struct ocfs2_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) int ret, orig_credits = jbd2_handle_buffer_credits(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) struct ocfs2_path *tmp_path = NULL, *restart_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) if (!ocfs2_is_empty_extent(&el->l_recs[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) if (path->p_tree_depth == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) rightmost_no_delete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) * Inline extents. This is trivially handled, so do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) * it up front.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) ret = ocfs2_rotate_rightmost_leaf_left(handle, et, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) * Handle rightmost branch now. There's several cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) * 1) simple rotation leaving records in there. That's trivial.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) * 2) rotation requiring a branch delete - there's no more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) * records left. Two cases of this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) * a) There are branches to the left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) * b) This is also the leftmost (the only) branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) * 1) is handled via ocfs2_rotate_rightmost_leaf_left()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) * 2a) we need the left branch so that we can update it with the unlink
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) * 2b) we need to bring the root back to inline extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) if (eb->h_next_leaf_blk == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) * This gets a bit tricky if we're going to delete the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) * rightmost path. Get the other cases out of the way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) * 1st.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) if (le16_to_cpu(el->l_next_free_rec) > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) goto rightmost_no_delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) if (le16_to_cpu(el->l_next_free_rec) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) ret = ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) "Owner %llu has empty extent block at %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) (unsigned long long)le64_to_cpu(eb->h_blkno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) * XXX: The caller can not trust "path" any more after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) * this as it will have been deleted. What do we do?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) * In theory the rotate-for-merge code will never get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) * here because it'll always ask for a rotate in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) * nonempty list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) ret = ocfs2_remove_rightmost_path(handle, et, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) * Now we can loop, remembering the path we get from -EAGAIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) * and restarting from there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) try_rotate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) ret = __ocfs2_rotate_tree_left(handle, et, orig_credits, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) dealloc, &restart_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) if (ret && ret != -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) while (ret == -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) tmp_path = restart_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) restart_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) ret = __ocfs2_rotate_tree_left(handle, et, orig_credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) tmp_path, dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) &restart_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) if (ret && ret != -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) ocfs2_free_path(tmp_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) tmp_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) goto try_rotate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) ocfs2_free_path(tmp_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) ocfs2_free_path(restart_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) static void ocfs2_cleanup_merge(struct ocfs2_extent_list *el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) struct ocfs2_extent_rec *rec = &el->l_recs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) if (rec->e_leaf_clusters == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) * We consumed all of the merged-from record. An empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) * extent cannot exist anywhere but the 1st array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) * position, so move things over if the merged-from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) * record doesn't occupy that position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) * This creates a new empty extent so the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) * should be smart enough to have removed any existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) * ones.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) if (index > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) size = index * sizeof(struct ocfs2_extent_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) memmove(&el->l_recs[1], &el->l_recs[0], size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) * Always memset - the caller doesn't check whether it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) * created an empty extent, so there could be junk in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) * the other fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) static int ocfs2_get_right_path(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) struct ocfs2_path *left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) struct ocfs2_path **ret_right_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) u32 right_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) struct ocfs2_path *right_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) struct ocfs2_extent_list *left_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) *ret_right_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) /* This function shouldn't be called for non-trees. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) BUG_ON(left_path->p_tree_depth == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) left_el = path_leaf_el(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) BUG_ON(left_el->l_next_free_rec != left_el->l_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) ret = ocfs2_find_cpos_for_right_leaf(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) left_path, &right_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) /* This function shouldn't be called for the rightmost leaf. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) BUG_ON(right_cpos == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) right_path = ocfs2_new_path_from_path(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) if (!right_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) ret = ocfs2_find_path(et->et_ci, right_path, right_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) *ret_right_path = right_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) ocfs2_free_path(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) * Remove split_rec clusters from the record at index and merge them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) * onto the beginning of the record "next" to it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) * For index < l_count - 1, the next means the extent rec at index + 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) * For index == l_count - 1, the "next" means the 1st extent rec of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) * next extent block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) static int ocfs2_merge_rec_right(struct ocfs2_path *left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) struct ocfs2_extent_rec *split_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) int ret, next_free, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) struct ocfs2_extent_rec *left_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) struct ocfs2_extent_rec *right_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) struct ocfs2_extent_list *right_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) struct ocfs2_path *right_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) int subtree_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) struct ocfs2_extent_list *el = path_leaf_el(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) struct buffer_head *bh = path_leaf_bh(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) struct buffer_head *root_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) BUG_ON(index >= le16_to_cpu(el->l_next_free_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) left_rec = &el->l_recs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) if (index == le16_to_cpu(el->l_next_free_rec) - 1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) /* we meet with a cross extent block merge. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) ret = ocfs2_get_right_path(et, left_path, &right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) right_el = path_leaf_el(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) next_free = le16_to_cpu(right_el->l_next_free_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) BUG_ON(next_free <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) right_rec = &right_el->l_recs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) if (ocfs2_is_empty_extent(right_rec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) BUG_ON(next_free <= 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) right_rec = &right_el->l_recs[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) BUG_ON(le32_to_cpu(left_rec->e_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) le16_to_cpu(left_rec->e_leaf_clusters) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) le32_to_cpu(right_rec->e_cpos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) subtree_index = ocfs2_find_subtree_root(et, left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) jbd2_handle_buffer_credits(handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) root_bh = left_path->p_node[subtree_index].bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) subtree_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) for (i = subtree_index + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) i < path_num_items(right_path); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) right_path, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) left_path, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) BUG_ON(index == le16_to_cpu(el->l_next_free_rec) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) right_rec = &el->l_recs[index + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) ret = ocfs2_path_bh_journal_access(handle, et->et_ci, left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) path_num_items(left_path) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) le16_add_cpu(&left_rec->e_leaf_clusters, -split_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) le32_add_cpu(&right_rec->e_cpos, -split_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) le64_add_cpu(&right_rec->e_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) -ocfs2_clusters_to_blocks(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) split_clusters));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) le16_add_cpu(&right_rec->e_leaf_clusters, split_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) ocfs2_cleanup_merge(el, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) ocfs2_journal_dirty(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) if (right_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) ocfs2_journal_dirty(handle, path_leaf_bh(right_path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) ocfs2_complete_edge_insert(handle, left_path, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) subtree_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) ocfs2_free_path(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) static int ocfs2_get_left_path(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) struct ocfs2_path *right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) struct ocfs2_path **ret_left_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) u32 left_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) struct ocfs2_path *left_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) *ret_left_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) /* This function shouldn't be called for non-trees. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) BUG_ON(right_path->p_tree_depth == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) right_path, &left_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) /* This function shouldn't be called for the leftmost leaf. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) BUG_ON(left_cpos == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) left_path = ocfs2_new_path_from_path(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) if (!left_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) ret = ocfs2_find_path(et->et_ci, left_path, left_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) *ret_left_path = left_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) ocfs2_free_path(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) * Remove split_rec clusters from the record at index and merge them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) * onto the tail of the record "before" it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) * For index > 0, the "before" means the extent rec at index - 1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) * For index == 0, the "before" means the last record of the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) * extent block. And there is also a situation that we may need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) * remove the rightmost leaf extent block in the right_path and change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) * the right path to indicate the new rightmost path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) struct ocfs2_extent_rec *split_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) struct ocfs2_cached_dealloc_ctxt *dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) int ret, i, subtree_index = 0, has_empty_extent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) unsigned int split_clusters = le16_to_cpu(split_rec->e_leaf_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) struct ocfs2_extent_rec *left_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) struct ocfs2_extent_rec *right_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) struct ocfs2_extent_list *el = path_leaf_el(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) struct buffer_head *bh = path_leaf_bh(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) struct buffer_head *root_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) struct ocfs2_path *left_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) struct ocfs2_extent_list *left_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) BUG_ON(index < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) right_rec = &el->l_recs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) if (index == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) /* we meet with a cross extent block merge. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) ret = ocfs2_get_left_path(et, right_path, &left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) left_el = path_leaf_el(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) BUG_ON(le16_to_cpu(left_el->l_next_free_rec) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) le16_to_cpu(left_el->l_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) left_rec = &left_el->l_recs[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) le16_to_cpu(left_el->l_next_free_rec) - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) BUG_ON(le32_to_cpu(left_rec->e_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) le16_to_cpu(left_rec->e_leaf_clusters) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) le32_to_cpu(split_rec->e_cpos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) subtree_index = ocfs2_find_subtree_root(et, left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) ret = ocfs2_extend_rotate_transaction(handle, subtree_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) jbd2_handle_buffer_credits(handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) root_bh = left_path->p_node[subtree_index].bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) subtree_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) for (i = subtree_index + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) i < path_num_items(right_path); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) right_path, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) left_path, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) left_rec = &el->l_recs[index - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) if (ocfs2_is_empty_extent(&el->l_recs[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) has_empty_extent = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) path_num_items(right_path) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) if (has_empty_extent && index == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) * The easy case - we can just plop the record right in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) *left_rec = *split_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) le16_add_cpu(&left_rec->e_leaf_clusters, split_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) le32_add_cpu(&right_rec->e_cpos, split_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) le64_add_cpu(&right_rec->e_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) ocfs2_clusters_to_blocks(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) split_clusters));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) le16_add_cpu(&right_rec->e_leaf_clusters, -split_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) ocfs2_cleanup_merge(el, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) ocfs2_journal_dirty(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) if (left_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) ocfs2_journal_dirty(handle, path_leaf_bh(left_path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) * In the situation that the right_rec is empty and the extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) * block is empty also, ocfs2_complete_edge_insert can't handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) * it and we need to delete the right extent block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) if (le16_to_cpu(right_rec->e_leaf_clusters) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) le16_to_cpu(el->l_next_free_rec) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) /* extend credit for ocfs2_remove_rightmost_path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) ret = ocfs2_extend_rotate_transaction(handle, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) jbd2_handle_buffer_credits(handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) ret = ocfs2_remove_rightmost_path(handle, et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) /* Now the rightmost extent block has been deleted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) * So we use the new rightmost path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) ocfs2_mv_path(right_path, left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) left_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) ocfs2_complete_edge_insert(handle, left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) right_path, subtree_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) ocfs2_free_path(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) static int ocfs2_try_to_merge_extent(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) struct ocfs2_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) int split_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) struct ocfs2_extent_rec *split_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) struct ocfs2_cached_dealloc_ctxt *dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) struct ocfs2_merge_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) struct ocfs2_extent_list *el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) struct ocfs2_extent_rec *rec = &el->l_recs[split_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) BUG_ON(ctxt->c_contig_type == CONTIG_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) if (ctxt->c_split_covers_rec && ctxt->c_has_empty_extent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) /* extend credit for ocfs2_remove_rightmost_path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) ret = ocfs2_extend_rotate_transaction(handle, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) jbd2_handle_buffer_credits(handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) * The merge code will need to create an empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) * extent to take the place of the newly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) * emptied slot. Remove any pre-existing empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) * extents - having more than one in a leaf is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) * illegal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) split_index--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) rec = &el->l_recs[split_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) if (ctxt->c_contig_type == CONTIG_LEFTRIGHT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) * Left-right contig implies this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) BUG_ON(!ctxt->c_split_covers_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) * Since the leftright insert always covers the entire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) * extent, this call will delete the insert record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) * entirely, resulting in an empty extent record added to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) * the extent block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) * Since the adding of an empty extent shifts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) * everything back to the right, there's no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) * update split_index here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) * When the split_index is zero, we need to merge it to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) * prevoius extent block. It is more efficient and easier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) * if we do merge_right first and merge_left later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) ret = ocfs2_merge_rec_right(path, handle, et, split_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) split_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) * We can only get this from logic error above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) BUG_ON(!ocfs2_is_empty_extent(&el->l_recs[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) /* extend credit for ocfs2_remove_rightmost_path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) ret = ocfs2_extend_rotate_transaction(handle, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) jbd2_handle_buffer_credits(handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) /* The merge left us with an empty extent, remove it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) rec = &el->l_recs[split_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) * Note that we don't pass split_rec here on purpose -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) * we've merged it into the rec already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) ret = ocfs2_merge_rec_left(path, handle, et, rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) dealloc, split_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) /* extend credit for ocfs2_remove_rightmost_path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) ret = ocfs2_extend_rotate_transaction(handle, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) jbd2_handle_buffer_credits(handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) * Error from this last rotate is not critical, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) * print but don't bubble it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) * Merge a record to the left or right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) * 'contig_type' is relative to the existing record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) * so for example, if we're "right contig", it's to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) * the record on the left (hence the left merge).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) if (ctxt->c_contig_type == CONTIG_RIGHT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) ret = ocfs2_merge_rec_left(path, handle, et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) split_rec, dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) split_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) ret = ocfs2_merge_rec_right(path, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) et, split_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) split_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) if (ctxt->c_split_covers_rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) /* extend credit for ocfs2_remove_rightmost_path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) ret = ocfs2_extend_rotate_transaction(handle, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) jbd2_handle_buffer_credits(handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) * The merge may have left an empty extent in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) * our leaf. Try to rotate it away.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) ret = ocfs2_rotate_tree_left(handle, et, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) static void ocfs2_subtract_from_rec(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) enum ocfs2_split_type split,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) struct ocfs2_extent_rec *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) struct ocfs2_extent_rec *split_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) u64 len_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) len_blocks = ocfs2_clusters_to_blocks(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) le16_to_cpu(split_rec->e_leaf_clusters));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) if (split == SPLIT_LEFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) * Region is on the left edge of the existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) * record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) le32_add_cpu(&rec->e_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) le16_to_cpu(split_rec->e_leaf_clusters));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) le64_add_cpu(&rec->e_blkno, len_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) le16_add_cpu(&rec->e_leaf_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) -le16_to_cpu(split_rec->e_leaf_clusters));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) * Region is on the right edge of the existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) * record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) le16_add_cpu(&rec->e_leaf_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) -le16_to_cpu(split_rec->e_leaf_clusters));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) * Do the final bits of extent record insertion at the target leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) * list. If this leaf is part of an allocation tree, it is assumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) * that the tree above has been prepared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) static void ocfs2_insert_at_leaf(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) struct ocfs2_extent_rec *insert_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) struct ocfs2_extent_list *el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) struct ocfs2_insert_type *insert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) int i = insert->ins_contig_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) unsigned int range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) if (insert->ins_split != SPLIT_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) i = ocfs2_search_extent_list(el, le32_to_cpu(insert_rec->e_cpos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) BUG_ON(i == -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) rec = &el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) ocfs2_subtract_from_rec(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) insert->ins_split, rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) insert_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) goto rotate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) * Contiguous insert - either left or right.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) if (insert->ins_contig != CONTIG_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) rec = &el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) if (insert->ins_contig == CONTIG_LEFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) rec->e_blkno = insert_rec->e_blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) rec->e_cpos = insert_rec->e_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) le16_add_cpu(&rec->e_leaf_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) le16_to_cpu(insert_rec->e_leaf_clusters));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) * Handle insert into an empty leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) if (le16_to_cpu(el->l_next_free_rec) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) ((le16_to_cpu(el->l_next_free_rec) == 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) ocfs2_is_empty_extent(&el->l_recs[0]))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) el->l_recs[0] = *insert_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) el->l_next_free_rec = cpu_to_le16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) * Appending insert.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) if (insert->ins_appending == APPEND_TAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) i = le16_to_cpu(el->l_next_free_rec) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) rec = &el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) range = le32_to_cpu(rec->e_cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) + le16_to_cpu(rec->e_leaf_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) BUG_ON(le32_to_cpu(insert_rec->e_cpos) < range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) mlog_bug_on_msg(le16_to_cpu(el->l_next_free_rec) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) le16_to_cpu(el->l_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) "owner %llu, depth %u, count %u, next free %u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) "rec.cpos %u, rec.clusters %u, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) "insert.cpos %u, insert.clusters %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) le16_to_cpu(el->l_tree_depth),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) le16_to_cpu(el->l_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) le16_to_cpu(el->l_next_free_rec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) le32_to_cpu(el->l_recs[i].e_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) le16_to_cpu(el->l_recs[i].e_leaf_clusters),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) le32_to_cpu(insert_rec->e_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) le16_to_cpu(insert_rec->e_leaf_clusters));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) el->l_recs[i] = *insert_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) le16_add_cpu(&el->l_next_free_rec, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) rotate:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) * Ok, we have to rotate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) * At this point, it is safe to assume that inserting into an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) * empty leaf and appending to a leaf have both been handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) * above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) * This leaf needs to have space, either by the empty 1st
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) * extent record, or by virtue of an l_next_free_rec < l_count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) ocfs2_rotate_leaf(el, insert_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) static void ocfs2_adjust_rightmost_records(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) struct ocfs2_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) struct ocfs2_extent_rec *insert_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) int i, next_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) * Update everything except the leaf block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) for (i = 0; i < path->p_tree_depth; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) bh = path->p_node[i].bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) el = path->p_node[i].el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) next_free = le16_to_cpu(el->l_next_free_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) if (next_free == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) "Owner %llu has a bad extent list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) rec = &el->l_recs[next_free - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) rec->e_int_clusters = insert_rec->e_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) le32_add_cpu(&rec->e_int_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) le16_to_cpu(insert_rec->e_leaf_clusters));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) le32_add_cpu(&rec->e_int_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) -le32_to_cpu(rec->e_cpos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) ocfs2_journal_dirty(handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) static int ocfs2_append_rec_to_path(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) struct ocfs2_extent_rec *insert_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) struct ocfs2_path *right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) struct ocfs2_path **ret_left_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) int ret, next_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) struct ocfs2_path *left_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) *ret_left_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) * This shouldn't happen for non-trees. The extent rec cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) * count manipulation below only works for interior nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) BUG_ON(right_path->p_tree_depth == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) * If our appending insert is at the leftmost edge of a leaf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) * then we might need to update the rightmost records of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) * neighboring path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) el = path_leaf_el(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) next_free = le16_to_cpu(el->l_next_free_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) if (next_free == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0]))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) u32 left_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) ret = ocfs2_find_cpos_for_left_leaf(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) right_path, &left_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) trace_ocfs2_append_rec_to_path(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) (unsigned long long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) le32_to_cpu(insert_rec->e_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) left_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) * No need to worry if the append is already in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) * leftmost leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) if (left_cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) left_path = ocfs2_new_path_from_path(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) if (!left_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) ret = ocfs2_find_path(et->et_ci, left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) left_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) * ocfs2_insert_path() will pass the left_path to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) * journal for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) ret = ocfs2_journal_access_path(et->et_ci, handle, right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) ocfs2_adjust_rightmost_records(handle, et, right_path, insert_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) *ret_left_path = left_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) if (ret != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) ocfs2_free_path(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) static void ocfs2_split_record(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) struct ocfs2_path *left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) struct ocfs2_path *right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) struct ocfs2_extent_rec *split_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) enum ocfs2_split_type split)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) u32 cpos = le32_to_cpu(split_rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) struct ocfs2_extent_list *left_el = NULL, *right_el, *insert_el, *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) struct ocfs2_extent_rec *rec, *tmprec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) right_el = path_leaf_el(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) if (left_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) left_el = path_leaf_el(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) el = right_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) insert_el = right_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) index = ocfs2_search_extent_list(el, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) if (index != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) if (index == 0 && left_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) BUG_ON(ocfs2_is_empty_extent(&el->l_recs[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) * This typically means that the record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) * started in the left path but moved to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) * right as a result of rotation. We either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) * move the existing record to the left, or we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) * do the later insert there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) * In this case, the left path should always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) * exist as the rotate code will have passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) * it back for a post-insert update.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) if (split == SPLIT_LEFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) * It's a left split. Since we know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) * that the rotate code gave us an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) * empty extent in the left path, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) * can just do the insert there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) insert_el = left_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) * Right split - we have to move the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) * existing record over to the left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) * leaf. The insert will be into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) * newly created empty extent in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) * right leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) tmprec = &right_el->l_recs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) ocfs2_rotate_leaf(left_el, tmprec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) el = left_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) memset(tmprec, 0, sizeof(*tmprec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) index = ocfs2_search_extent_list(left_el, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) BUG_ON(index == -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) BUG_ON(!left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) BUG_ON(!ocfs2_is_empty_extent(&left_el->l_recs[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) * Left path is easy - we can just allow the insert to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) * happen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) el = left_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) insert_el = left_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) index = ocfs2_search_extent_list(el, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) BUG_ON(index == -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) rec = &el->l_recs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) ocfs2_subtract_from_rec(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) split, rec, split_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) ocfs2_rotate_leaf(insert_el, split_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) * This function only does inserts on an allocation b-tree. For tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) * depth = 0, ocfs2_insert_at_leaf() is called directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) * right_path is the path we want to do the actual insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) * in. left_path should only be passed in if we need to update that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) * portion of the tree after an edge insert.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) static int ocfs2_insert_path(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) struct ocfs2_path *left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) struct ocfs2_path *right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) struct ocfs2_extent_rec *insert_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) struct ocfs2_insert_type *insert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) int ret, subtree_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) struct buffer_head *leaf_bh = path_leaf_bh(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) if (left_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) * There's a chance that left_path got passed back to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) * us without being accounted for in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) * journal. Extend our transaction here to be sure we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) * can change those blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) ret = ocfs2_extend_trans(handle, left_path->p_tree_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) ret = ocfs2_journal_access_path(et->et_ci, handle, left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) * Pass both paths to the journal. The majority of inserts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) * will be touching all components anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) ret = ocfs2_journal_access_path(et->et_ci, handle, right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) if (insert->ins_split != SPLIT_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) * We could call ocfs2_insert_at_leaf() for some types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) * of splits, but it's easier to just let one separate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) * function sort it all out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) ocfs2_split_record(et, left_path, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) insert_rec, insert->ins_split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) * Split might have modified either leaf and we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) * have a guarantee that the later edge insert will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) * dirty this for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) if (left_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) ocfs2_journal_dirty(handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) path_leaf_bh(left_path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) ocfs2_insert_at_leaf(et, insert_rec, path_leaf_el(right_path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) insert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) ocfs2_journal_dirty(handle, leaf_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) if (left_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) * The rotate code has indicated that we need to fix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) * up portions of the tree after the insert.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) * XXX: Should we extend the transaction here?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) subtree_index = ocfs2_find_subtree_root(et, left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) ocfs2_complete_edge_insert(handle, left_path, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) subtree_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) static int ocfs2_do_insert_extent(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) struct ocfs2_extent_rec *insert_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) struct ocfs2_insert_type *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) int ret, rotate = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) u32 cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) struct ocfs2_path *right_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) struct ocfs2_path *left_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) el = et->et_root_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) ret = ocfs2_et_root_journal_access(handle, et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) if (le16_to_cpu(el->l_tree_depth) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) ocfs2_insert_at_leaf(et, insert_rec, el, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) goto out_update_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) right_path = ocfs2_new_path_from_et(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) if (!right_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) * Determine the path to start with. Rotations need the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) * rightmost path, everything else can go directly to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) * target leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) cpos = le32_to_cpu(insert_rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) if (type->ins_appending == APPEND_NONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) type->ins_contig == CONTIG_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) rotate = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) cpos = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) ret = ocfs2_find_path(et->et_ci, right_path, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) * Rotations and appends need special treatment - they modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) * parts of the tree's above them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) * Both might pass back a path immediate to the left of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) * one being inserted to. This will be cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) * ocfs2_insert_path() to modify the rightmost records of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) * left_path to account for an edge insert.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) * XXX: When modifying this code, keep in mind that an insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) * can wind up skipping both of these two special cases...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) if (rotate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) ret = ocfs2_rotate_tree_right(handle, et, type->ins_split,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) le32_to_cpu(insert_rec->e_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) right_path, &left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) * ocfs2_rotate_tree_right() might have extended the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) * transaction without re-journaling our tree root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) ret = ocfs2_et_root_journal_access(handle, et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) } else if (type->ins_appending == APPEND_TAIL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) && type->ins_contig != CONTIG_LEFT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) ret = ocfs2_append_rec_to_path(handle, et, insert_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) right_path, &left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) ret = ocfs2_insert_path(handle, et, left_path, right_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) insert_rec, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) out_update_clusters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) if (type->ins_split == SPLIT_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) ocfs2_et_update_clusters(et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) le16_to_cpu(insert_rec->e_leaf_clusters));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) ocfs2_journal_dirty(handle, et->et_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) ocfs2_free_path(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) ocfs2_free_path(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) static int ocfs2_figure_merge_contig_type(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) struct ocfs2_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) struct ocfs2_extent_list *el, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) struct ocfs2_extent_rec *split_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) struct ocfs2_merge_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) enum ocfs2_contig_type ret = CONTIG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) u32 left_cpos, right_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) struct ocfs2_extent_rec *rec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) struct ocfs2_extent_list *new_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) struct ocfs2_path *left_path = NULL, *right_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) if (index > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) rec = &el->l_recs[index - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) } else if (path->p_tree_depth > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) status = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) if (left_cpos != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) left_path = ocfs2_new_path_from_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) if (!left_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) status = ocfs2_find_path(et->et_ci, left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) left_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) goto free_left_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) new_el = path_leaf_el(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) if (le16_to_cpu(new_el->l_next_free_rec) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) le16_to_cpu(new_el->l_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) bh = path_leaf_bh(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) eb = (struct ocfs2_extent_block *)bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) status = ocfs2_error(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) "Extent block #%llu has an invalid l_next_free_rec of %d. It should have matched the l_count of %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) (unsigned long long)le64_to_cpu(eb->h_blkno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) le16_to_cpu(new_el->l_next_free_rec),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) le16_to_cpu(new_el->l_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) goto free_left_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) rec = &new_el->l_recs[
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) le16_to_cpu(new_el->l_next_free_rec) - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) * We're careful to check for an empty extent record here -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) * the merge code will know what to do if it sees one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) if (rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) if (index == 1 && ocfs2_is_empty_extent(rec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) if (split_rec->e_cpos == el->l_recs[index].e_cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) ret = CONTIG_RIGHT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) ret = ocfs2_et_extent_contig(et, rec, split_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) rec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) if (index < (le16_to_cpu(el->l_next_free_rec) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) rec = &el->l_recs[index + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) else if (le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) path->p_tree_depth > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) status = ocfs2_find_cpos_for_right_leaf(sb, path, &right_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) goto free_left_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) if (right_cpos == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) goto free_left_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) right_path = ocfs2_new_path_from_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) if (!right_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) goto free_left_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) status = ocfs2_find_path(et->et_ci, right_path, right_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) goto free_right_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) new_el = path_leaf_el(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) rec = &new_el->l_recs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) if (ocfs2_is_empty_extent(rec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) if (le16_to_cpu(new_el->l_next_free_rec) <= 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) bh = path_leaf_bh(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) eb = (struct ocfs2_extent_block *)bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) status = ocfs2_error(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) "Extent block #%llu has an invalid l_next_free_rec of %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) (unsigned long long)le64_to_cpu(eb->h_blkno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) le16_to_cpu(new_el->l_next_free_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) goto free_right_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) rec = &new_el->l_recs[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) if (rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) enum ocfs2_contig_type contig_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) contig_type = ocfs2_et_extent_contig(et, rec, split_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) if (contig_type == CONTIG_LEFT && ret == CONTIG_RIGHT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) ret = CONTIG_LEFTRIGHT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) else if (ret == CONTIG_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) ret = contig_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) free_right_path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) ocfs2_free_path(right_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) free_left_path:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) ocfs2_free_path(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) if (status == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) ctxt->c_contig_type = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) static void ocfs2_figure_contig_type(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) struct ocfs2_insert_type *insert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) struct ocfs2_extent_list *el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) struct ocfs2_extent_rec *insert_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) enum ocfs2_contig_type contig_type = CONTIG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) contig_type = ocfs2_et_extent_contig(et, &el->l_recs[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) insert_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) if (contig_type != CONTIG_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) insert->ins_contig_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) insert->ins_contig = contig_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) if (insert->ins_contig != CONTIG_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) struct ocfs2_extent_rec *rec =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) &el->l_recs[insert->ins_contig_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) unsigned int len = le16_to_cpu(rec->e_leaf_clusters) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) le16_to_cpu(insert_rec->e_leaf_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) * Caller might want us to limit the size of extents, don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) * calculate contiguousness if we might exceed that limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) if (et->et_max_leaf_clusters &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) (len > et->et_max_leaf_clusters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) insert->ins_contig = CONTIG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) * This should only be called against the righmost leaf extent list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) * ocfs2_figure_appending_type() will figure out whether we'll have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) * insert at the tail of the rightmost leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) * This should also work against the root extent list for tree's with 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) * depth. If we consider the root extent list to be the rightmost leaf node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) * then the logic here makes sense.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) static void ocfs2_figure_appending_type(struct ocfs2_insert_type *insert,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) struct ocfs2_extent_list *el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) struct ocfs2_extent_rec *insert_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) u32 cpos = le32_to_cpu(insert_rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) insert->ins_appending = APPEND_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) if (!el->l_next_free_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) goto set_tail_append;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) if (ocfs2_is_empty_extent(&el->l_recs[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) /* Were all records empty? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) if (le16_to_cpu(el->l_next_free_rec) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) goto set_tail_append;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) i = le16_to_cpu(el->l_next_free_rec) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) rec = &el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) if (cpos >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) (le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) goto set_tail_append;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) set_tail_append:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) insert->ins_appending = APPEND_TAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) * Helper function called at the beginning of an insert.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) * This computes a few things that are commonly used in the process of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) * inserting into the btree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) * - Whether the new extent is contiguous with an existing one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) * - The current tree depth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) * - Whether the insert is an appending one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) * - The total # of free records in the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) * All of the information is stored on the ocfs2_insert_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) * structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) static int ocfs2_figure_insert_type(struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) struct buffer_head **last_eb_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) struct ocfs2_extent_rec *insert_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) int *free_records,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) struct ocfs2_insert_type *insert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) struct ocfs2_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) insert->ins_split = SPLIT_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) el = et->et_root_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) insert->ins_tree_depth = le16_to_cpu(el->l_tree_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) if (el->l_tree_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) * If we have tree depth, we read in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) * rightmost extent block ahead of time as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) * ocfs2_figure_insert_type() and ocfs2_add_branch()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) * may want it later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) ret = ocfs2_read_extent_block(et->et_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) ocfs2_et_get_last_eb_blk(et),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) eb = (struct ocfs2_extent_block *) bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) * Unless we have a contiguous insert, we'll need to know if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) * there is room left in our allocation tree for another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) * extent record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) * XXX: This test is simplistic, we can search for empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) * extent records too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) *free_records = le16_to_cpu(el->l_count) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) le16_to_cpu(el->l_next_free_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) if (!insert->ins_tree_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) ocfs2_figure_contig_type(et, insert, el, insert_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) ocfs2_figure_appending_type(insert, el, insert_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) path = ocfs2_new_path_from_et(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) * In the case that we're inserting past what the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) * currently accounts for, ocfs2_find_path() will return for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) * us the rightmost tree path. This is accounted for below in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) * the appending code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) ret = ocfs2_find_path(et->et_ci, path, le32_to_cpu(insert_rec->e_cpos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) * Now that we have the path, there's two things we want to determine:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) * 1) Contiguousness (also set contig_index if this is so)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) * 2) Are we doing an append? We can trivially break this up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) * into two types of appends: simple record append, or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) * rotate inside the tail leaf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) ocfs2_figure_contig_type(et, insert, el, insert_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) * The insert code isn't quite ready to deal with all cases of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) * left contiguousness. Specifically, if it's an insert into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) * the 1st record in a leaf, it will require the adjustment of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) * cluster count on the last record of the path directly to it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) * left. For now, just catch that case and fool the layers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) * above us. This works just fine for tree_depth == 0, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) * is why we allow that above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) if (insert->ins_contig == CONTIG_LEFT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) insert->ins_contig_index == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) insert->ins_contig = CONTIG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) * Ok, so we can simply compare against last_eb to figure out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) * whether the path doesn't exist. This will only happen in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) * the case that we're doing a tail append, so maybe we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) * take advantage of that information somehow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) if (ocfs2_et_get_last_eb_blk(et) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) path_leaf_bh(path)->b_blocknr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) * Ok, ocfs2_find_path() returned us the rightmost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) * tree path. This might be an appending insert. There are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) * two cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) * 1) We're doing a true append at the tail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) * -This might even be off the end of the leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) * 2) We're "appending" by rotating in the tail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) ocfs2_figure_appending_type(insert, el, insert_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) ocfs2_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) *last_eb_bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) * Insert an extent into a btree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) * The caller needs to update the owning btree's cluster count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) int ocfs2_insert_extent(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) u32 cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) u64 start_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) u32 new_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) u8 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) struct ocfs2_alloc_context *meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) int free_records;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) struct buffer_head *last_eb_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) struct ocfs2_insert_type insert = {0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) struct ocfs2_extent_rec rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) trace_ocfs2_insert_extent_start(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) cpos, new_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) memset(&rec, 0, sizeof(rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) rec.e_cpos = cpu_to_le32(cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) rec.e_blkno = cpu_to_le64(start_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) rec.e_leaf_clusters = cpu_to_le16(new_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) rec.e_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) status = ocfs2_et_insert_check(et, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) status = ocfs2_figure_insert_type(et, &last_eb_bh, &rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) &free_records, &insert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) trace_ocfs2_insert_extent(insert.ins_appending, insert.ins_contig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) insert.ins_contig_index, free_records,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) insert.ins_tree_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) if (insert.ins_contig == CONTIG_NONE && free_records == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) status = ocfs2_grow_tree(handle, et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) &insert.ins_tree_depth, &last_eb_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) /* Finally, we can add clusters. This might rotate the tree for us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) status = ocfs2_do_insert_extent(handle, et, &rec, &insert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) ocfs2_et_extent_map_insert(et, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) brelse(last_eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) * Allcate and add clusters into the extent b-tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) * The new clusters(clusters_to_add) will be inserted at logical_offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) * The extent b-tree's root is specified by et, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) * it is not limited to the file storage. Any extent tree can use this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) * function if it implements the proper ocfs2_extent_tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) int ocfs2_add_clusters_in_btree(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) u32 *logical_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) u32 clusters_to_add,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) int mark_unwritten,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) struct ocfs2_alloc_context *data_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) enum ocfs2_alloc_restarted *reason_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) int status = 0, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) int need_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) int free_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) enum ocfs2_alloc_restarted reason = RESTART_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) u32 bit_off, num_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) u64 block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) u8 flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) struct ocfs2_super *osb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) BUG_ON(!clusters_to_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) if (mark_unwritten)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) flags = OCFS2_EXT_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) free_extents = ocfs2_num_free_extents(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) if (free_extents < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) status = free_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) /* there are two cases which could cause us to EAGAIN in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) * we-need-more-metadata case:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) * 1) we haven't reserved *any*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) * 2) we are so fragmented, we've needed to add metadata too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) * many times. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) if (!free_extents && !meta_ac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) err = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) status = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) reason = RESTART_META;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) } else if ((!free_extents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) && (ocfs2_alloc_context_bits_left(meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) < ocfs2_extend_meta_needed(et->et_root_el))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) err = -2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) status = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) reason = RESTART_META;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) status = __ocfs2_claim_clusters(handle, data_ac, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) clusters_to_add, &bit_off, &num_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) if (status != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) goto leave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) BUG_ON(num_bits > clusters_to_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) /* reserve our write early -- insert_extent may update the tree root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) status = ocfs2_et_root_journal_access(handle, et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) need_free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) trace_ocfs2_add_clusters_in_btree(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) bit_off, num_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) status = ocfs2_insert_extent(handle, et, *logical_offset, block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) num_bits, flags, meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) need_free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) ocfs2_journal_dirty(handle, et->et_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) clusters_to_add -= num_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) *logical_offset += num_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) if (clusters_to_add) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) err = clusters_to_add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) status = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) reason = RESTART_TRANS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) if (need_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) if (data_ac->ac_which == OCFS2_AC_USE_LOCAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) ocfs2_free_local_alloc_bits(osb, handle, data_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) bit_off, num_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) ocfs2_free_clusters(handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) data_ac->ac_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) data_ac->ac_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) ocfs2_clusters_to_blocks(osb->sb, bit_off),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) num_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) leave:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) if (reason_ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) *reason_ret = reason;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) trace_ocfs2_add_clusters_in_btree_ret(status, reason, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) static void ocfs2_make_right_split_rec(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) struct ocfs2_extent_rec *split_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) u32 cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) struct ocfs2_extent_rec *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) u32 rec_cpos = le32_to_cpu(rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) u32 rec_range = rec_cpos + le16_to_cpu(rec->e_leaf_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) memset(split_rec, 0, sizeof(struct ocfs2_extent_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) split_rec->e_cpos = cpu_to_le32(cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) split_rec->e_leaf_clusters = cpu_to_le16(rec_range - cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) split_rec->e_blkno = rec->e_blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) le64_add_cpu(&split_rec->e_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) ocfs2_clusters_to_blocks(sb, cpos - rec_cpos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) split_rec->e_flags = rec->e_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) static int ocfs2_split_and_insert(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) struct ocfs2_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) struct buffer_head **last_eb_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) int split_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) struct ocfs2_extent_rec *orig_split_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) struct ocfs2_alloc_context *meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) int ret = 0, depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) unsigned int insert_range, rec_range, do_leftright = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) struct ocfs2_extent_rec tmprec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) struct ocfs2_extent_list *rightmost_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) struct ocfs2_extent_rec rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) struct ocfs2_extent_rec split_rec = *orig_split_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) struct ocfs2_insert_type insert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) leftright:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) * Store a copy of the record on the stack - it might move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) * around as the tree is manipulated below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) rec = path_leaf_el(path)->l_recs[split_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) rightmost_el = et->et_root_el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) depth = le16_to_cpu(rightmost_el->l_tree_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) if (depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) BUG_ON(!(*last_eb_bh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) rightmost_el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) if (le16_to_cpu(rightmost_el->l_next_free_rec) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) le16_to_cpu(rightmost_el->l_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) ret = ocfs2_grow_tree(handle, et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) &depth, last_eb_bh, meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) memset(&insert, 0, sizeof(struct ocfs2_insert_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) insert.ins_appending = APPEND_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) insert.ins_contig = CONTIG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) insert.ins_tree_depth = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) insert_range = le32_to_cpu(split_rec.e_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) le16_to_cpu(split_rec.e_leaf_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) rec_range = le32_to_cpu(rec.e_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) le16_to_cpu(rec.e_leaf_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) if (split_rec.e_cpos == rec.e_cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) insert.ins_split = SPLIT_LEFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) } else if (insert_range == rec_range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) insert.ins_split = SPLIT_RIGHT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) * Left/right split. We fake this as a right split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) * first and then make a second pass as a left split.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) insert.ins_split = SPLIT_RIGHT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) ocfs2_make_right_split_rec(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) &tmprec, insert_range, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) split_rec = tmprec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) BUG_ON(do_leftright);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) do_leftright = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) ret = ocfs2_do_insert_extent(handle, et, &split_rec, &insert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) if (do_leftright == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) u32 cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) do_leftright++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) split_rec = *orig_split_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) ocfs2_reinit_path(path, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) cpos = le32_to_cpu(split_rec.e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) ret = ocfs2_find_path(et->et_ci, path, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) split_index = ocfs2_search_extent_list(el, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) if (split_index == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) "Owner %llu has an extent at cpos %u which can no longer be found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) goto leftright;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) static int ocfs2_replace_extent_rec(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) struct ocfs2_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) struct ocfs2_extent_list *el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) int split_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) struct ocfs2_extent_rec *split_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) ret = ocfs2_path_bh_journal_access(handle, et->et_ci, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) path_num_items(path) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) el->l_recs[split_index] = *split_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) ocfs2_journal_dirty(handle, path_leaf_bh(path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) * Split part or all of the extent record at split_index in the leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) * pointed to by path. Merge with the contiguous extent record if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) * Care is taken to handle contiguousness so as to not grow the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) * meta_ac is not strictly necessary - we only truly need it if growth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) * of the tree is required. All other cases will degrade into a less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) * optimal tree layout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) * last_eb_bh should be the rightmost leaf block for any extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) * btree. Since a split may grow the tree or a merge might shrink it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) * the caller cannot trust the contents of that buffer after this call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) * This code is optimized for readability - several passes might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) * made over certain portions of the tree. All of those blocks will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) * have been brought into cache (and pinned via the journal), so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) * extra overhead is not expressed in terms of disk reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) int ocfs2_split_extent(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) struct ocfs2_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) int split_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) struct ocfs2_extent_rec *split_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) struct ocfs2_extent_list *el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) struct buffer_head *last_eb_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) struct ocfs2_extent_rec *rec = &el->l_recs[split_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) struct ocfs2_merge_ctxt ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) if (le32_to_cpu(rec->e_cpos) > le32_to_cpu(split_rec->e_cpos) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) ((le32_to_cpu(rec->e_cpos) + le16_to_cpu(rec->e_leaf_clusters)) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) (le32_to_cpu(split_rec->e_cpos) + le16_to_cpu(split_rec->e_leaf_clusters)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) ret = ocfs2_figure_merge_contig_type(et, path, el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) split_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) split_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) &ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) * The core merge / split code wants to know how much room is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) * left in this allocation tree, so we pass the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) * rightmost extent list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) if (path->p_tree_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) ret = ocfs2_read_extent_block(et->et_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) ocfs2_et_get_last_eb_blk(et),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) &last_eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) if (rec->e_cpos == split_rec->e_cpos &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) rec->e_leaf_clusters == split_rec->e_leaf_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) ctxt.c_split_covers_rec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) ctxt.c_split_covers_rec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) ctxt.c_has_empty_extent = ocfs2_is_empty_extent(&el->l_recs[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) trace_ocfs2_split_extent(split_index, ctxt.c_contig_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) ctxt.c_has_empty_extent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) ctxt.c_split_covers_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) if (ctxt.c_contig_type == CONTIG_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) if (ctxt.c_split_covers_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) ret = ocfs2_replace_extent_rec(handle, et, path, el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) split_index, split_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) ret = ocfs2_split_and_insert(handle, et, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) &last_eb_bh, split_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) split_rec, meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) ret = ocfs2_try_to_merge_extent(handle, et, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) split_index, split_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) dealloc, &ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) brelse(last_eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) * Change the flags of the already-existing extent at cpos for len clusters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) * new_flags: the flags we want to set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) * clear_flags: the flags we want to clear.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) * phys: the new physical offset we want this new extent starts from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) * If the existing extent is larger than the request, initiate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) * split. An attempt will be made at merging with adjacent extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) * The caller is responsible for passing down meta_ac if we'll need it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) int ocfs2_change_extent_flag(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) u32 cpos, u32 len, u32 phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) struct ocfs2_cached_dealloc_ctxt *dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) int new_flags, int clear_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) int ret, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) u64 start_blkno = ocfs2_clusters_to_blocks(sb, phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) struct ocfs2_extent_rec split_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) struct ocfs2_path *left_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) left_path = ocfs2_new_path_from_et(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) if (!left_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) ret = ocfs2_find_path(et->et_ci, left_path, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) el = path_leaf_el(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) index = ocfs2_search_extent_list(el, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) if (index == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) ocfs2_error(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) "Owner %llu has an extent at cpos %u which can no longer be found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) rec = &el->l_recs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) if (new_flags && (rec->e_flags & new_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) mlog(ML_ERROR, "Owner %llu tried to set %d flags on an "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) "extent that already had them\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) new_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) if (clear_flags && !(rec->e_flags & clear_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) mlog(ML_ERROR, "Owner %llu tried to clear %d flags on an "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) "extent that didn't have them\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) clear_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) memset(&split_rec, 0, sizeof(struct ocfs2_extent_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) split_rec.e_cpos = cpu_to_le32(cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) split_rec.e_leaf_clusters = cpu_to_le16(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) split_rec.e_blkno = cpu_to_le64(start_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) split_rec.e_flags = rec->e_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) if (new_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) split_rec.e_flags |= new_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) if (clear_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) split_rec.e_flags &= ~clear_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) ret = ocfs2_split_extent(handle, et, left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) index, &split_rec, meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) ocfs2_free_path(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) * Mark the already-existing extent at cpos as written for len clusters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) * This removes the unwritten extent flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) * If the existing extent is larger than the request, initiate a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) * split. An attempt will be made at merging with adjacent extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) * The caller is responsible for passing down meta_ac if we'll need it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) int ocfs2_mark_extent_written(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) handle_t *handle, u32 cpos, u32 len, u32 phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) trace_ocfs2_mark_extent_written(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) cpos, len, phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) if (!ocfs2_writes_unwritten_extents(OCFS2_SB(inode->i_sb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) ocfs2_error(inode->i_sb, "Inode %llu has unwritten extents that are being written to, but the feature bit is not set in the super block\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) (unsigned long long)OCFS2_I(inode)->ip_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) * XXX: This should be fixed up so that we just re-insert the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) * next extent records.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) ocfs2_et_extent_map_truncate(et, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) ret = ocfs2_change_extent_flag(handle, et, cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) len, phys, meta_ac, dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) 0, OCFS2_EXT_UNWRITTEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) static int ocfs2_split_tree(handle_t *handle, struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) struct ocfs2_path *path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) int index, u32 new_range,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) struct ocfs2_alloc_context *meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) int ret, depth, credits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) struct buffer_head *last_eb_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) struct ocfs2_extent_list *rightmost_el, *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) struct ocfs2_extent_rec split_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) struct ocfs2_insert_type insert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) * Setup the record to split before we grow the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) rec = &el->l_recs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) ocfs2_make_right_split_rec(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) &split_rec, new_range, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) depth = path->p_tree_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) if (depth > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) ret = ocfs2_read_extent_block(et->et_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) ocfs2_et_get_last_eb_blk(et),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) &last_eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) eb = (struct ocfs2_extent_block *) last_eb_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) rightmost_el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) rightmost_el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) credits = path->p_tree_depth +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) ocfs2_extend_meta_needed(et->et_root_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) ret = ocfs2_extend_trans(handle, credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) if (le16_to_cpu(rightmost_el->l_next_free_rec) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) le16_to_cpu(rightmost_el->l_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) ret = ocfs2_grow_tree(handle, et, &depth, &last_eb_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) memset(&insert, 0, sizeof(struct ocfs2_insert_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) insert.ins_appending = APPEND_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) insert.ins_contig = CONTIG_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) insert.ins_split = SPLIT_RIGHT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) insert.ins_tree_depth = depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) ret = ocfs2_do_insert_extent(handle, et, &split_rec, &insert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) brelse(last_eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) static int ocfs2_truncate_rec(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) struct ocfs2_path *path, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) struct ocfs2_cached_dealloc_ctxt *dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) u32 cpos, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) u32 left_cpos, rec_range, trunc_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) int is_rightmost_tree_rec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) struct ocfs2_path *left_path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) struct ocfs2_extent_list *el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) if (ocfs2_is_empty_extent(&el->l_recs[0]) && index > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) /* extend credit for ocfs2_remove_rightmost_path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) ret = ocfs2_extend_rotate_transaction(handle, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) jbd2_handle_buffer_credits(handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) index--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) if (index == (le16_to_cpu(el->l_next_free_rec) - 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) path->p_tree_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) * Check whether this is the rightmost tree record. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) * we remove all of this record or part of its right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) * edge then an update of the record lengths above it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) * will be required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) eb = (struct ocfs2_extent_block *)path_leaf_bh(path)->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) if (eb->h_next_leaf_blk == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) is_rightmost_tree_rec = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) rec = &el->l_recs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) if (index == 0 && path->p_tree_depth &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) le32_to_cpu(rec->e_cpos) == cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) * Changing the leftmost offset (via partial or whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) * record truncate) of an interior (or rightmost) path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) * means we have to update the subtree that is formed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) * by this leaf and the one to it's left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) * There are two cases we can skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) * 1) Path is the leftmost one in our btree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) * 2) The leaf is rightmost and will be empty after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) * we remove the extent record - the rotate code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) * knows how to update the newly formed edge.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) ret = ocfs2_find_cpos_for_left_leaf(sb, path, &left_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) if (left_cpos && le16_to_cpu(el->l_next_free_rec) > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) left_path = ocfs2_new_path_from_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) if (!left_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) ret = ocfs2_find_path(et->et_ci, left_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) left_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) ret = ocfs2_extend_rotate_transaction(handle, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) jbd2_handle_buffer_credits(handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) ret = ocfs2_journal_access_path(et->et_ci, handle, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) ret = ocfs2_journal_access_path(et->et_ci, handle, left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) trunc_range = cpos + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) if (le32_to_cpu(rec->e_cpos) == cpos && rec_range == trunc_range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) int next_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) memset(rec, 0, sizeof(*rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) ocfs2_cleanup_merge(el, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) next_free = le16_to_cpu(el->l_next_free_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) if (is_rightmost_tree_rec && next_free > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) * We skip the edge update if this path will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) * be deleted by the rotate code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) rec = &el->l_recs[next_free - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) ocfs2_adjust_rightmost_records(handle, et, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) } else if (le32_to_cpu(rec->e_cpos) == cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) /* Remove leftmost portion of the record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) le32_add_cpu(&rec->e_cpos, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) le64_add_cpu(&rec->e_blkno, ocfs2_clusters_to_blocks(sb, len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) le16_add_cpu(&rec->e_leaf_clusters, -len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) } else if (rec_range == trunc_range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) /* Remove rightmost portion of the record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) le16_add_cpu(&rec->e_leaf_clusters, -len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) if (is_rightmost_tree_rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) ocfs2_adjust_rightmost_records(handle, et, path, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) /* Caller should have trapped this. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) mlog(ML_ERROR, "Owner %llu: Invalid record truncate: (%u, %u) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) "(%u, %u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) le32_to_cpu(rec->e_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) le16_to_cpu(rec->e_leaf_clusters), cpos, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) if (left_path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) int subtree_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) subtree_index = ocfs2_find_subtree_root(et, left_path, path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) ocfs2_complete_edge_insert(handle, left_path, path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) subtree_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) ocfs2_journal_dirty(handle, path_leaf_bh(path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) ret = ocfs2_rotate_tree_left(handle, et, path, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) ocfs2_free_path(left_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) int ocfs2_remove_extent(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) u32 cpos, u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) struct ocfs2_alloc_context *meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) struct ocfs2_cached_dealloc_ctxt *dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) int ret, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) u32 rec_range, trunc_range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) struct ocfs2_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) * XXX: Why are we truncating to 0 instead of wherever this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) * affects us?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) ocfs2_et_extent_map_truncate(et, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) path = ocfs2_new_path_from_et(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) ret = ocfs2_find_path(et->et_ci, path, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) index = ocfs2_search_extent_list(el, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) if (index == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) "Owner %llu has an extent at cpos %u which can no longer be found\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) * We have 3 cases of extent removal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) * 1) Range covers the entire extent rec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) * 2) Range begins or ends on one edge of the extent rec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) * 3) Range is in the middle of the extent rec (no shared edges)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) * For case 1 we remove the extent rec and left rotate to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) * fill the hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) * For case 2 we just shrink the existing extent rec, with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) * tree update if the shrinking edge is also the edge of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) * extent block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) * For case 3 we do a right split to turn the extent rec into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) * something case 2 can handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) rec = &el->l_recs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) rec_range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) trunc_range = cpos + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) BUG_ON(cpos < le32_to_cpu(rec->e_cpos) || trunc_range > rec_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) trace_ocfs2_remove_extent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) cpos, len, index, le32_to_cpu(rec->e_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) ocfs2_rec_clusters(el, rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) if (le32_to_cpu(rec->e_cpos) == cpos || rec_range == trunc_range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) ret = ocfs2_truncate_rec(handle, et, path, index, dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) cpos, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) ret = ocfs2_split_tree(handle, et, path, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) trunc_range, meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) * The split could have manipulated the tree enough to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) * move the record location, so we have to look for it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) ocfs2_reinit_path(path, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) ret = ocfs2_find_path(et->et_ci, path, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) index = ocfs2_search_extent_list(el, cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) if (index == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) "Owner %llu: split at cpos %u lost record\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) * Double check our values here. If anything is fishy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) * it's easier to catch it at the top level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) rec = &el->l_recs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) rec_range = le32_to_cpu(rec->e_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) ocfs2_rec_clusters(el, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) if (rec_range != trunc_range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) "Owner %llu: error after split at cpos %u trunc len %u, existing record is (%u,%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) cpos, len, le32_to_cpu(rec->e_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) ocfs2_rec_clusters(el, rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) ret = ocfs2_truncate_rec(handle, et, path, index, dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) cpos, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) ocfs2_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) * ocfs2_reserve_blocks_for_rec_trunc() would look basically the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) * same as ocfs2_lock_alloctors(), except for it accepts a blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) * number to reserve some extra blocks, and it only handles meta
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) * data allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) * Currently, only ocfs2_remove_btree_range() uses it for truncating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) * and punching holes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) static int ocfs2_reserve_blocks_for_rec_trunc(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) u32 extents_to_split,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) struct ocfs2_alloc_context **ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) int extra_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) int ret = 0, num_free_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) unsigned int max_recs_needed = 2 * extents_to_split;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) *ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) num_free_extents = ocfs2_num_free_extents(et);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) if (num_free_extents < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) ret = num_free_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) if (!num_free_extents ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) extra_blocks += ocfs2_extend_meta_needed(et->et_root_el);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) if (extra_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) if (ret != -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) if (*ac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) ocfs2_free_alloc_context(*ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) *ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) int ocfs2_remove_btree_range(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) u32 cpos, u32 phys_cpos, u32 len, int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) struct ocfs2_cached_dealloc_ctxt *dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) u64 refcount_loc, bool refcount_tree_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) int ret, credits = 0, extra_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) struct inode *tl_inode = osb->osb_tl_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) struct ocfs2_alloc_context *meta_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) struct ocfs2_refcount_tree *ref_tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) if ((flags & OCFS2_EXT_REFCOUNTED) && len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) BUG_ON(!ocfs2_is_refcount_inode(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) if (!refcount_tree_locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) &ref_tree, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) ret = ocfs2_prepare_refcount_change_for_del(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) refcount_loc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) phys_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) &credits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) &extra_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) ret = ocfs2_reserve_blocks_for_rec_trunc(inode, et, 1, &meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) extra_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) inode_lock(tl_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) if (ocfs2_truncate_log_needs_flush(osb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) ret = __ocfs2_flush_truncate_log(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) handle = ocfs2_start_trans(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) ocfs2_remove_extent_credits(osb->sb) + credits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) ret = ocfs2_et_root_journal_access(handle, et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) dquot_free_space_nodirty(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) ocfs2_clusters_to_bytes(inode->i_sb, len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) ret = ocfs2_remove_extent(handle, et, cpos, len, meta_ac, dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) ocfs2_et_update_clusters(et, -len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) ocfs2_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) ocfs2_journal_dirty(handle, et->et_root_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) if (phys_blkno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) if (flags & OCFS2_EXT_REFCOUNTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) ret = ocfs2_decrease_refcount(inode, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) ocfs2_blocks_to_clusters(osb->sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) phys_blkno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) len, meta_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) dealloc, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) ret = ocfs2_truncate_log_append(osb, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) phys_blkno, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) inode_unlock(tl_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) if (meta_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) ocfs2_free_alloc_context(meta_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) if (ref_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) int ocfs2_truncate_log_needs_flush(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) struct buffer_head *tl_bh = osb->osb_tl_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) struct ocfs2_dinode *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) struct ocfs2_truncate_log *tl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) di = (struct ocfs2_dinode *) tl_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) tl = &di->id2.i_dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) mlog_bug_on_msg(le16_to_cpu(tl->tl_used) > le16_to_cpu(tl->tl_count),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) "slot %d, invalid truncate log parameters: used = "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) "%u, count = %u\n", osb->slot_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) le16_to_cpu(tl->tl_used), le16_to_cpu(tl->tl_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) return le16_to_cpu(tl->tl_used) == le16_to_cpu(tl->tl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) static int ocfs2_truncate_log_can_coalesce(struct ocfs2_truncate_log *tl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) unsigned int new_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) unsigned int tail_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) unsigned int current_tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) /* No records, nothing to coalesce */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) if (!le16_to_cpu(tl->tl_used))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) tail_index = le16_to_cpu(tl->tl_used) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) current_tail = le32_to_cpu(tl->tl_recs[tail_index].t_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) current_tail += le32_to_cpu(tl->tl_recs[tail_index].t_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) return current_tail == new_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) int ocfs2_truncate_log_append(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) u64 start_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) unsigned int num_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) int status, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) unsigned int start_cluster, tl_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) struct inode *tl_inode = osb->osb_tl_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) struct buffer_head *tl_bh = osb->osb_tl_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) struct ocfs2_dinode *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) struct ocfs2_truncate_log *tl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) BUG_ON(inode_trylock(tl_inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) start_cluster = ocfs2_blocks_to_clusters(osb->sb, start_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) di = (struct ocfs2_dinode *) tl_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) /* tl_bh is loaded from ocfs2_truncate_log_init(). It's validated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) * by the underlying call to ocfs2_read_inode_block(), so any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) * corruption is a code bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859) BUG_ON(!OCFS2_IS_VALID_DINODE(di));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) tl = &di->id2.i_dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) tl_count = le16_to_cpu(tl->tl_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) mlog_bug_on_msg(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) tl_count == 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865) "Truncate record count on #%llu invalid "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) "wanted %u, actual %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868) ocfs2_truncate_recs_per_inode(osb->sb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) le16_to_cpu(tl->tl_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871) /* Caller should have known to flush before calling us. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) index = le16_to_cpu(tl->tl_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873) if (index >= tl_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) status = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879) status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) trace_ocfs2_truncate_log_append(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887) (unsigned long long)OCFS2_I(tl_inode)->ip_blkno, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) start_cluster, num_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) if (ocfs2_truncate_log_can_coalesce(tl, start_cluster)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) * Move index back to the record we are coalescing with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) * ocfs2_truncate_log_can_coalesce() guarantees nonzero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) index--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) num_clusters += le32_to_cpu(tl->tl_recs[index].t_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) trace_ocfs2_truncate_log_append(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898) (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) index, le32_to_cpu(tl->tl_recs[index].t_start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) num_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) tl->tl_recs[index].t_start = cpu_to_le32(start_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) tl->tl_used = cpu_to_le16(index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) tl->tl_recs[index].t_clusters = cpu_to_le32(num_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) ocfs2_journal_dirty(handle, tl_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) osb->truncated_clusters += num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914) static int ocfs2_replay_truncate_records(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) struct inode *data_alloc_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) struct buffer_head *data_alloc_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) unsigned int num_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) u64 start_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) struct ocfs2_truncate_rec rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) struct ocfs2_dinode *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) struct ocfs2_truncate_log *tl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925) struct inode *tl_inode = osb->osb_tl_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) struct buffer_head *tl_bh = osb->osb_tl_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) di = (struct ocfs2_dinode *) tl_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) tl = &di->id2.i_dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) i = le16_to_cpu(tl->tl_used) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932) while (i >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_FLUSH_ONE_REC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) status = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) /* Caller has given us at least enough credits to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) * update the truncate log dinode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) status = ocfs2_journal_access_di(handle, INODE_CACHE(tl_inode), tl_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) tl->tl_used = cpu_to_le16(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) ocfs2_journal_dirty(handle, tl_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) rec = tl->tl_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954) start_blk = ocfs2_clusters_to_blocks(data_alloc_inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) le32_to_cpu(rec.t_start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956) num_clusters = le32_to_cpu(rec.t_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) /* if start_blk is not set, we ignore the record as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) * invalid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) if (start_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961) trace_ocfs2_replay_truncate_records(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) i, le32_to_cpu(rec.t_start), num_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) status = ocfs2_free_clusters(handle, data_alloc_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) data_alloc_bh, start_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) num_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978) osb->truncated_clusters = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) /* Expects you to already be holding tl_inode->i_mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) int __ocfs2_flush_truncate_log(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988) unsigned int num_to_flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) struct inode *tl_inode = osb->osb_tl_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) struct inode *data_alloc_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) struct buffer_head *tl_bh = osb->osb_tl_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992) struct buffer_head *data_alloc_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) struct ocfs2_dinode *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) struct ocfs2_truncate_log *tl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) struct ocfs2_journal *journal = osb->journal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) BUG_ON(inode_trylock(tl_inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999) di = (struct ocfs2_dinode *) tl_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) /* tl_bh is loaded from ocfs2_truncate_log_init(). It's validated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002) * by the underlying call to ocfs2_read_inode_block(), so any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) * corruption is a code bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004) BUG_ON(!OCFS2_IS_VALID_DINODE(di));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) tl = &di->id2.i_dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) num_to_flush = le16_to_cpu(tl->tl_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) trace_ocfs2_flush_truncate_log(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) (unsigned long long)OCFS2_I(tl_inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) num_to_flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) if (!num_to_flush) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016) /* Appending truncate log(TA) and flushing truncate log(TF) are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017) * two separated transactions. They can be both committed but not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) * checkpointed. If crash occurs then, both two transaction will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) * replayed with several already released to global bitmap clusters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020) * Then truncate log will be replayed resulting in cluster double free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022) jbd2_journal_lock_updates(journal->j_journal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) status = jbd2_journal_flush(journal->j_journal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024) jbd2_journal_unlock_updates(journal->j_journal);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) data_alloc_inode = ocfs2_get_system_file_inode(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) GLOBAL_BITMAP_SYSTEM_INODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) OCFS2_INVALID_SLOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) if (!data_alloc_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) mlog(ML_ERROR, "Could not get bitmap inode!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) inode_lock(data_alloc_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041) status = ocfs2_inode_lock(data_alloc_inode, &data_alloc_bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047) status = ocfs2_replay_truncate_records(osb, data_alloc_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) data_alloc_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) brelse(data_alloc_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053) ocfs2_inode_unlock(data_alloc_inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) out_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056) inode_unlock(data_alloc_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057) iput(data_alloc_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) int ocfs2_flush_truncate_log(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) struct inode *tl_inode = osb->osb_tl_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) inode_lock(tl_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) status = __ocfs2_flush_truncate_log(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) inode_unlock(tl_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) static void ocfs2_truncate_log_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) struct ocfs2_super *osb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079) container_of(work, struct ocfs2_super,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) osb_truncate_log_wq.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) status = ocfs2_flush_truncate_log(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) ocfs2_init_steal_slots(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) #define OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL (2 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) int cancel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) if (osb->osb_tl_inode &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) atomic_read(&osb->osb_tl_disable) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) /* We want to push off log flushes while truncates are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096) * still running. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) if (cancel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) cancel_delayed_work(&osb->osb_truncate_log_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) queue_delayed_work(osb->ocfs2_wq, &osb->osb_truncate_log_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) OCFS2_TRUNCATE_LOG_FLUSH_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106) * Try to flush truncate logs if we can free enough clusters from it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) * As for return value, "< 0" means error, "0" no space and "1" means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108) * we have freed enough spaces and let the caller try to allocate again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110) int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111) unsigned int needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113) tid_t target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) unsigned int truncated_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117) inode_lock(osb->osb_tl_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118) truncated_clusters = osb->truncated_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) inode_unlock(osb->osb_tl_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122) * Check whether we can succeed in allocating if we free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123) * the truncate log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125) if (truncated_clusters < needed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128) ret = ocfs2_flush_truncate_log(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134) if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135) jbd2_log_wait_commit(osb->journal->j_journal, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142) static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143) int slot_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144) struct inode **tl_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145) struct buffer_head **tl_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148) struct inode *inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151) inode = ocfs2_get_system_file_inode(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152) TRUNCATE_LOG_SYSTEM_INODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153) slot_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154) if (!inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155) status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) mlog(ML_ERROR, "Could not get load truncate log inode!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160) status = ocfs2_read_inode_block(inode, &bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167) *tl_inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168) *tl_bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173) /* called during the 1st stage of node recovery. we stamp a clean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174) * truncate log and pass back a copy for processing later. if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) * truncate log does not require processing, a *tl_copy is set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) * NULL. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177) int ocfs2_begin_truncate_log_recovery(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178) int slot_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179) struct ocfs2_dinode **tl_copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182) struct inode *tl_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183) struct buffer_head *tl_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184) struct ocfs2_dinode *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185) struct ocfs2_truncate_log *tl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) *tl_copy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) trace_ocfs2_begin_truncate_log_recovery(slot_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191) status = ocfs2_get_truncate_log_info(osb, slot_num, &tl_inode, &tl_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197) di = (struct ocfs2_dinode *) tl_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199) /* tl_bh is loaded from ocfs2_get_truncate_log_info(). It's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200) * validated by the underlying call to ocfs2_read_inode_block(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201) * so any corruption is a code bug */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202) BUG_ON(!OCFS2_IS_VALID_DINODE(di));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204) tl = &di->id2.i_dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205) if (le16_to_cpu(tl->tl_used)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206) trace_ocfs2_truncate_log_recovery_num(le16_to_cpu(tl->tl_used));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209) * Assuming the write-out below goes well, this copy will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210) * passed back to recovery for processing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212) *tl_copy = kmemdup(tl_bh->b_data, tl_bh->b_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213) if (!(*tl_copy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219) /* All we need to do to clear the truncate log is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220) * tl_used. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221) tl->tl_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223) ocfs2_compute_meta_ecc(osb->sb, tl_bh->b_data, &di->i_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224) status = ocfs2_write_block(osb, tl_bh, INODE_CACHE(tl_inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232) iput(tl_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) brelse(tl_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236) kfree(*tl_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237) *tl_copy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) int ocfs2_complete_truncate_log_recovery(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245) struct ocfs2_dinode *tl_copy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249) unsigned int clusters, num_recs, start_cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250) u64 start_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252) struct inode *tl_inode = osb->osb_tl_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253) struct ocfs2_truncate_log *tl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) if (OCFS2_I(tl_inode)->ip_blkno == le64_to_cpu(tl_copy->i_blkno)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256) mlog(ML_ERROR, "Asked to recover my own truncate log!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260) tl = &tl_copy->id2.i_dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261) num_recs = le16_to_cpu(tl->tl_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262) trace_ocfs2_complete_truncate_log_recovery(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263) (unsigned long long)le64_to_cpu(tl_copy->i_blkno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264) num_recs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266) inode_lock(tl_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267) for(i = 0; i < num_recs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268) if (ocfs2_truncate_log_needs_flush(osb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269) status = __ocfs2_flush_truncate_log(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272) goto bail_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276) handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278) status = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280) goto bail_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283) clusters = le32_to_cpu(tl->tl_recs[i].t_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284) start_cluster = le32_to_cpu(tl->tl_recs[i].t_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285) start_blk = ocfs2_clusters_to_blocks(osb->sb, start_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287) status = ocfs2_truncate_log_append(osb, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) start_blk, clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292) goto bail_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296) bail_up:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297) inode_unlock(tl_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302) void ocfs2_truncate_log_shutdown(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) struct inode *tl_inode = osb->osb_tl_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307) atomic_set(&osb->osb_tl_disable, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309) if (tl_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310) cancel_delayed_work(&osb->osb_truncate_log_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311) flush_workqueue(osb->ocfs2_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313) status = ocfs2_flush_truncate_log(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6315) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6317) brelse(osb->osb_tl_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6318) iput(osb->osb_tl_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6322) int ocfs2_truncate_log_init(struct ocfs2_super *osb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6324) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6325) struct inode *tl_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6326) struct buffer_head *tl_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6328) status = ocfs2_get_truncate_log_info(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6329) osb->slot_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6330) &tl_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6331) &tl_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6332) if (status < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6333) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6335) /* ocfs2_truncate_log_shutdown keys on the existence of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6336) * osb->osb_tl_inode so we don't set any of the osb variables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6337) * until we're sure all is well. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6338) INIT_DELAYED_WORK(&osb->osb_truncate_log_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6339) ocfs2_truncate_log_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6340) atomic_set(&osb->osb_tl_disable, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6341) osb->osb_tl_bh = tl_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6342) osb->osb_tl_inode = tl_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6344) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6347) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6348) * Delayed de-allocation of suballocator blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6349) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6350) * Some sets of block de-allocations might involve multiple suballocator inodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6351) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6352) * The locking for this can get extremely complicated, especially when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6353) * the suballocator inodes to delete from aren't known until deep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6354) * within an unrelated codepath.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6355) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6356) * ocfs2_extent_block structures are a good example of this - an inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6357) * btree could have been grown by any number of nodes each allocating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6358) * out of their own suballoc inode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6359) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6360) * These structures allow the delay of block de-allocation until a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6361) * later time, when locking of multiple cluster inodes won't cause
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6362) * deadlock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6363) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6366) * Describe a single bit freed from a suballocator. For the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6367) * suballocators, it represents one block. For the global cluster
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6368) * allocator, it represents some clusters and free_bit indicates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6369) * clusters number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6371) struct ocfs2_cached_block_free {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6372) struct ocfs2_cached_block_free *free_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6373) u64 free_bg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6374) u64 free_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6375) unsigned int free_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6376) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6378) struct ocfs2_per_slot_free_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6379) struct ocfs2_per_slot_free_list *f_next_suballocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6380) int f_inode_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6381) int f_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6382) struct ocfs2_cached_block_free *f_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6383) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6385) static int ocfs2_free_cached_blocks(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6386) int sysfile_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6387) int slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6388) struct ocfs2_cached_block_free *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6390) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6391) u64 bg_blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6392) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6393) struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6394) struct buffer_head *di_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6395) struct ocfs2_cached_block_free *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6397) inode = ocfs2_get_system_file_inode(osb, sysfile_type, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6398) if (!inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6399) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6400) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6401) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6404) inode_lock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6406) ret = ocfs2_inode_lock(inode, &di_bh, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6407) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6408) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6409) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6412) while (head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6413) if (head->free_bg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6414) bg_blkno = head->free_bg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6415) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6416) bg_blkno = ocfs2_which_suballoc_group(head->free_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6417) head->free_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6418) handle = ocfs2_start_trans(osb, OCFS2_SUBALLOC_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6419) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6420) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6421) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6422) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6425) trace_ocfs2_free_cached_blocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6426) (unsigned long long)head->free_blk, head->free_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6428) ret = ocfs2_free_suballoc_bits(handle, inode, di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6429) head->free_bit, bg_blkno, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6430) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6431) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6433) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6435) tmp = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6436) head = head->free_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6437) kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6440) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6441) ocfs2_inode_unlock(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6442) brelse(di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6443) out_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6444) inode_unlock(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6445) iput(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6446) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6447) while(head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6448) /* Premature exit may have left some dangling items. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6449) tmp = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6450) head = head->free_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6451) kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6454) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6457) int ocfs2_cache_cluster_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6458) u64 blkno, unsigned int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6459) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6460) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6461) struct ocfs2_cached_block_free *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6463) item = kzalloc(sizeof(*item), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6464) if (item == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6465) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6466) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6467) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6470) trace_ocfs2_cache_cluster_dealloc((unsigned long long)blkno, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6472) item->free_blk = blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6473) item->free_bit = bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6474) item->free_next = ctxt->c_global_allocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6476) ctxt->c_global_allocator = item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6477) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6480) static int ocfs2_free_cached_clusters(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6481) struct ocfs2_cached_block_free *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6483) struct ocfs2_cached_block_free *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6484) struct inode *tl_inode = osb->osb_tl_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6485) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6486) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6488) inode_lock(tl_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6490) while (head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6491) if (ocfs2_truncate_log_needs_flush(osb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6492) ret = __ocfs2_flush_truncate_log(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6493) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6494) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6495) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6499) handle = ocfs2_start_trans(osb, OCFS2_TRUNCATE_LOG_UPDATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6500) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6501) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6502) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6503) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6506) ret = ocfs2_truncate_log_append(osb, handle, head->free_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6507) head->free_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6509) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6510) tmp = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6511) head = head->free_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6512) kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6514) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6515) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6516) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6520) inode_unlock(tl_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6522) while (head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6523) /* Premature exit may have left some dangling items. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6524) tmp = head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6525) head = head->free_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6526) kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6529) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6532) int ocfs2_run_deallocs(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6533) struct ocfs2_cached_dealloc_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6535) int ret = 0, ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6536) struct ocfs2_per_slot_free_list *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6538) if (!ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6539) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6541) while (ctxt->c_first_suballocator) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6542) fl = ctxt->c_first_suballocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6544) if (fl->f_first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6545) trace_ocfs2_run_deallocs(fl->f_inode_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6546) fl->f_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6547) ret2 = ocfs2_free_cached_blocks(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6548) fl->f_inode_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6549) fl->f_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6550) fl->f_first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6551) if (ret2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6552) mlog_errno(ret2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6553) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6554) ret = ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6557) ctxt->c_first_suballocator = fl->f_next_suballocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6558) kfree(fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6561) if (ctxt->c_global_allocator) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6562) ret2 = ocfs2_free_cached_clusters(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6563) ctxt->c_global_allocator);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6564) if (ret2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6565) mlog_errno(ret2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6566) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6567) ret = ret2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6569) ctxt->c_global_allocator = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6572) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6575) static struct ocfs2_per_slot_free_list *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6576) ocfs2_find_per_slot_free_list(int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6577) int slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6578) struct ocfs2_cached_dealloc_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6580) struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6582) while (fl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6583) if (fl->f_inode_type == type && fl->f_slot == slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6584) return fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6586) fl = fl->f_next_suballocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6589) fl = kmalloc(sizeof(*fl), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6590) if (fl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6591) fl->f_inode_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6592) fl->f_slot = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6593) fl->f_first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6594) fl->f_next_suballocator = ctxt->c_first_suballocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6596) ctxt->c_first_suballocator = fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6598) return fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6601) static struct ocfs2_per_slot_free_list *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6602) ocfs2_find_preferred_free_list(int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6603) int preferred_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6604) int *real_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6605) struct ocfs2_cached_dealloc_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6607) struct ocfs2_per_slot_free_list *fl = ctxt->c_first_suballocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6609) while (fl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6610) if (fl->f_inode_type == type && fl->f_slot == preferred_slot) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6611) *real_slot = fl->f_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6612) return fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6615) fl = fl->f_next_suballocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6618) /* If we can't find any free list matching preferred slot, just use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6619) * the first one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6621) fl = ctxt->c_first_suballocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6622) *real_slot = fl->f_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6624) return fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6627) /* Return Value 1 indicates empty */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6628) static int ocfs2_is_dealloc_empty(struct ocfs2_extent_tree *et)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6630) struct ocfs2_per_slot_free_list *fl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6632) if (!et->et_dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6633) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6635) fl = et->et_dealloc->c_first_suballocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6636) if (!fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6637) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6639) if (!fl->f_first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6640) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6642) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6645) /* If extent was deleted from tree due to extent rotation and merging, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6646) * no metadata is reserved ahead of time. Try to reuse some extents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6647) * just deleted. This is only used to reuse extent blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6648) * It is supposed to find enough extent blocks in dealloc if our estimation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6649) * on metadata is accurate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6651) static int ocfs2_reuse_blk_from_dealloc(handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6652) struct ocfs2_extent_tree *et,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6653) struct buffer_head **new_eb_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6654) int blk_wanted, int *blk_given)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6656) int i, status = 0, real_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6657) struct ocfs2_cached_dealloc_ctxt *dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6658) struct ocfs2_per_slot_free_list *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6659) struct ocfs2_cached_block_free *bf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6660) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6661) struct ocfs2_super *osb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6662) OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6664) *blk_given = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6666) /* If extent tree doesn't have a dealloc, this is not faulty. Just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6667) * tell upper caller dealloc can't provide any block and it should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6668) * ask for alloc to claim more space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6670) dealloc = et->et_dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6671) if (!dealloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6672) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6674) for (i = 0; i < blk_wanted; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6675) /* Prefer to use local slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6676) fl = ocfs2_find_preferred_free_list(EXTENT_ALLOC_SYSTEM_INODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6677) osb->slot_num, &real_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6678) dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6679) /* If no more block can be reused, we should claim more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6680) * from alloc. Just return here normally.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6681) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6682) if (!fl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6683) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6684) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6687) bf = fl->f_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6688) fl->f_first = bf->free_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6690) new_eb_bh[i] = sb_getblk(osb->sb, bf->free_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6691) if (new_eb_bh[i] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6692) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6693) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6694) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6697) mlog(0, "Reusing block(%llu) from "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6698) "dealloc(local slot:%d, real slot:%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6699) bf->free_blk, osb->slot_num, real_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6701) ocfs2_set_new_buffer_uptodate(et->et_ci, new_eb_bh[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6703) status = ocfs2_journal_access_eb(handle, et->et_ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6704) new_eb_bh[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6705) OCFS2_JOURNAL_ACCESS_CREATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6706) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6707) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6708) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6711) memset(new_eb_bh[i]->b_data, 0, osb->sb->s_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6712) eb = (struct ocfs2_extent_block *) new_eb_bh[i]->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6714) /* We can't guarantee that buffer head is still cached, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6715) * polutlate the extent block again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6717) strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6718) eb->h_blkno = cpu_to_le64(bf->free_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6719) eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6720) eb->h_suballoc_slot = cpu_to_le16(real_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6721) eb->h_suballoc_loc = cpu_to_le64(bf->free_bg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6722) eb->h_suballoc_bit = cpu_to_le16(bf->free_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6723) eb->h_list.l_count =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6724) cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6726) /* We'll also be dirtied by the caller, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6727) * this isn't absolutely necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6729) ocfs2_journal_dirty(handle, new_eb_bh[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6731) if (!fl->f_first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6732) dealloc->c_first_suballocator = fl->f_next_suballocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6733) kfree(fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6735) kfree(bf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6738) *blk_given = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6740) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6741) if (unlikely(status < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6742) for (i = 0; i < blk_wanted; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6743) brelse(new_eb_bh[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6746) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6749) int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6750) int type, int slot, u64 suballoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6751) u64 blkno, unsigned int bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6753) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6754) struct ocfs2_per_slot_free_list *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6755) struct ocfs2_cached_block_free *item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6757) fl = ocfs2_find_per_slot_free_list(type, slot, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6758) if (fl == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6759) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6760) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6761) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6764) item = kzalloc(sizeof(*item), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6765) if (item == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6766) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6767) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6768) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6771) trace_ocfs2_cache_block_dealloc(type, slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6772) (unsigned long long)suballoc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6773) (unsigned long long)blkno, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6775) item->free_bg = suballoc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6776) item->free_blk = blkno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6777) item->free_bit = bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6778) item->free_next = fl->f_first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6780) fl->f_first = item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6782) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6783) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6784) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6787) static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6788) struct ocfs2_extent_block *eb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6790) return ocfs2_cache_block_dealloc(ctxt, EXTENT_ALLOC_SYSTEM_INODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6791) le16_to_cpu(eb->h_suballoc_slot),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6792) le64_to_cpu(eb->h_suballoc_loc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6793) le64_to_cpu(eb->h_blkno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6794) le16_to_cpu(eb->h_suballoc_bit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6797) static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6799) set_buffer_uptodate(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6800) mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6801) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6804) void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6805) unsigned int from, unsigned int to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6806) struct page *page, int zero, u64 *phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6808) int ret, partial = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6809) loff_t start_byte = ((loff_t)page->index << PAGE_SHIFT) + from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6810) loff_t length = to - from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6812) ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6813) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6814) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6816) if (zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6817) zero_user_segment(page, from, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6819) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6820) * Need to set the buffers we zero'd into uptodate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6821) * here if they aren't - ocfs2_map_page_blocks()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6822) * might've skipped some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6824) ret = walk_page_buffers(handle, page_buffers(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6825) from, to, &partial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6826) ocfs2_zero_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6827) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6828) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6829) else if (ocfs2_should_order_data(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6830) ret = ocfs2_jbd2_inode_add_write(handle, inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6831) start_byte, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6832) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6833) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6836) if (!partial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6837) SetPageUptodate(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6839) flush_dcache_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6842) static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6843) loff_t end, struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6844) int numpages, u64 phys, handle_t *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6846) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6847) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6848) unsigned int from, to = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6849) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6851) BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6853) if (numpages == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6854) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6856) to = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6857) for(i = 0; i < numpages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6858) page = pages[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6860) from = start & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6861) if ((end >> PAGE_SHIFT) == page->index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6862) to = end & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6864) BUG_ON(from > PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6865) BUG_ON(to > PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6867) ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6868) &phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6870) start = (page->index + 1) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6872) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6873) if (pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6874) ocfs2_unlock_and_free_pages(pages, numpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6877) int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6878) struct page **pages, int *num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6880) int numpages, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6881) struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6882) unsigned long index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6883) loff_t last_page_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6885) BUG_ON(start > end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6887) numpages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6888) last_page_bytes = PAGE_ALIGN(end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6889) index = start >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6890) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6891) pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6892) if (!pages[numpages]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6893) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6894) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6895) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6898) numpages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6899) index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6900) } while (index < (last_page_bytes >> PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6902) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6903) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6904) if (pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6905) ocfs2_unlock_and_free_pages(pages, numpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6906) numpages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6909) *num = numpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6911) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6914) static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6915) struct page **pages, int *num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6917) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6919) BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6920) (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6922) return ocfs2_grab_pages(inode, start, end, pages, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6926) * Zero the area past i_size but still within an allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6927) * cluster. This avoids exposing nonzero data on subsequent file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6928) * extends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6929) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6930) * We need to call this before i_size is updated on the inode because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6931) * otherwise block_write_full_page() will skip writeout of pages past
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6932) * i_size. The new_i_size parameter is passed for this reason.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6933) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6934) int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6935) u64 range_start, u64 range_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6937) int ret = 0, numpages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6938) struct page **pages = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6939) u64 phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6940) unsigned int ext_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6941) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6944) * File systems which don't support sparse files zero on every
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6945) * extend.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6947) if (!ocfs2_sparse_alloc(OCFS2_SB(sb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6948) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6950) pages = kcalloc(ocfs2_pages_per_cluster(sb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6951) sizeof(struct page *), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6952) if (pages == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6953) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6954) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6955) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6958) if (range_start == range_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6959) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6961) ret = ocfs2_extent_map_get_blocks(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6962) range_start >> sb->s_blocksize_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6963) &phys, NULL, &ext_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6964) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6965) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6966) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6969) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6970) * Tail is a hole, or is marked unwritten. In either case, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6971) * can count on read and write to return/push zero's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6972) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6973) if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6974) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6976) ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6977) &numpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6978) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6979) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6980) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6983) ocfs2_zero_cluster_pages(inode, range_start, range_end, pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6984) numpages, phys, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6986) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6987) * Initiate writeout of the pages we zero'd here. We don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6988) * wait on them - the truncate_inode_pages() call later will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6989) * do that for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6990) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6991) ret = filemap_fdatawrite_range(inode->i_mapping, range_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6992) range_end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6993) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6994) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6996) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6997) kfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6999) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7002) static void ocfs2_zero_dinode_id2_with_xattr(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7003) struct ocfs2_dinode *di)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7005) unsigned int blocksize = 1 << inode->i_sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7006) unsigned int xattrsize = le16_to_cpu(di->i_xattr_inline_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7008) if (le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_XATTR_FL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7009) memset(&di->id2, 0, blocksize -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7010) offsetof(struct ocfs2_dinode, id2) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7011) xattrsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7012) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7013) memset(&di->id2, 0, blocksize -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7014) offsetof(struct ocfs2_dinode, id2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7017) void ocfs2_dinode_new_extent_list(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7018) struct ocfs2_dinode *di)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7020) ocfs2_zero_dinode_id2_with_xattr(inode, di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7021) di->id2.i_list.l_tree_depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7022) di->id2.i_list.l_next_free_rec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7023) di->id2.i_list.l_count = cpu_to_le16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7024) ocfs2_extent_recs_per_inode_with_xattr(inode->i_sb, di));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7027) void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7029) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7030) struct ocfs2_inline_data *idata = &di->id2.i_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7032) spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7033) oi->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7034) di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7035) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7037) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7038) * We clear the entire i_data structure here so that all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7039) * fields can be properly initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7041) ocfs2_zero_dinode_id2_with_xattr(inode, di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7043) idata->id_count = cpu_to_le16(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7044) ocfs2_max_inline_data_with_xattr(inode->i_sb, di));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7047) int ocfs2_convert_inline_data_to_extents(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7048) struct buffer_head *di_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7050) int ret, has_data, num_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7051) int need_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7052) u32 bit_off, num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7053) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7054) u64 block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7055) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7056) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7057) struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7058) struct ocfs2_alloc_context *data_ac = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7059) struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7060) struct ocfs2_extent_tree et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7061) int did_quota = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7063) has_data = i_size_read(inode) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7065) if (has_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7066) ret = ocfs2_reserve_clusters(osb, 1, &data_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7067) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7068) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7069) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7070) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7073) handle = ocfs2_start_trans(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7074) ocfs2_inline_to_extents_credits(osb->sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7075) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7076) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7077) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7078) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7081) ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7082) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7083) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7084) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7085) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7088) if (has_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7089) unsigned int page_end = min_t(unsigned, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7090) osb->s_clustersize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7091) u64 phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7093) ret = dquot_alloc_space_nodirty(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7094) ocfs2_clusters_to_bytes(osb->sb, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7095) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7096) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7097) did_quota = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7099) data_ac->ac_resv = &oi->ip_la_data_resv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7101) ret = ocfs2_claim_clusters(handle, data_ac, 1, &bit_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7102) &num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7103) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7104) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7105) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7109) * Save two copies, one for insert, and one that can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7110) * be changed by ocfs2_map_and_dirty_page() below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7112) block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7114) ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7115) &num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7116) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7117) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7118) need_free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7119) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7123) * This should populate the 1st page for us and mark
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7124) * it up to date.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7126) ret = ocfs2_read_inline_data(inode, page, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7127) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7128) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7129) need_free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7130) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7133) ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7134) &phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7137) spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7138) oi->ip_dyn_features &= ~OCFS2_INLINE_DATA_FL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7139) di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7140) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7142) ocfs2_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7143) ocfs2_dinode_new_extent_list(inode, di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7145) ocfs2_journal_dirty(handle, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7147) if (has_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7148) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7149) * An error at this point should be extremely rare. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7150) * this proves to be false, we could always re-build
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7151) * the in-inode data from our pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7152) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7153) ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7154) ret = ocfs2_insert_extent(handle, &et, 0, block, 1, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7155) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7156) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7157) need_free = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7158) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7161) inode->i_blocks = ocfs2_inode_sector_count(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7164) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7165) if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7166) ocfs2_unlock_and_free_pages(&page, num_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7168) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7169) if (ret < 0 && did_quota)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7170) dquot_free_space_nodirty(inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7171) ocfs2_clusters_to_bytes(osb->sb, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7173) if (need_free) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7174) if (data_ac->ac_which == OCFS2_AC_USE_LOCAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7175) ocfs2_free_local_alloc_bits(osb, handle, data_ac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7176) bit_off, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7177) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7178) ocfs2_free_clusters(handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7179) data_ac->ac_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7180) data_ac->ac_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7181) ocfs2_clusters_to_blocks(osb->sb, bit_off),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7182) num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7185) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7187) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7188) if (data_ac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7189) ocfs2_free_alloc_context(data_ac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7190) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7194) * It is expected, that by the time you call this function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7195) * inode->i_size and fe->i_size have been adjusted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7197) * WARNING: This will kfree the truncate context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7199) int ocfs2_commit_truncate(struct ocfs2_super *osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7200) struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7201) struct buffer_head *di_bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7203) int status = 0, i, flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7204) u32 new_highest_cpos, range, trunc_cpos, trunc_len, phys_cpos, coff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7205) u64 blkno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7206) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7207) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7208) struct ocfs2_path *path = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7209) struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7210) struct ocfs2_extent_list *root_el = &(di->id2.i_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7211) u64 refcount_loc = le64_to_cpu(di->i_refcount_loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7212) struct ocfs2_extent_tree et;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7213) struct ocfs2_cached_dealloc_ctxt dealloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7214) struct ocfs2_refcount_tree *ref_tree = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7216) ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7217) ocfs2_init_dealloc_ctxt(&dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7219) new_highest_cpos = ocfs2_clusters_for_bytes(osb->sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7220) i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7222) path = ocfs2_new_path(di_bh, &di->id2.i_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7223) ocfs2_journal_access_di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7224) if (!path) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7225) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7226) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7227) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7230) ocfs2_extent_map_trunc(inode, new_highest_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7232) start:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7234) * Check that we still have allocation to delete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7236) if (OCFS2_I(inode)->ip_clusters == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7237) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7238) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7241) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7242) * Truncate always works against the rightmost tree branch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7244) status = ocfs2_find_path(INODE_CACHE(inode), path, UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7245) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7246) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7247) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7250) trace_ocfs2_commit_truncate(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7251) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7252) new_highest_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7253) OCFS2_I(inode)->ip_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7254) path->p_tree_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7257) * By now, el will point to the extent list on the bottom most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7258) * portion of this tree. Only the tail record is considered in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7259) * each pass.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7260) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7261) * We handle the following cases, in order:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7262) * - empty extent: delete the remaining branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7263) * - remove the entire record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7264) * - remove a partial record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7265) * - no record needs to be removed (truncate has completed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7267) el = path_leaf_el(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7268) if (le16_to_cpu(el->l_next_free_rec) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7269) ocfs2_error(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7270) "Inode %llu has empty extent block at %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7271) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7272) (unsigned long long)path_leaf_bh(path)->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7273) status = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7274) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7277) i = le16_to_cpu(el->l_next_free_rec) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7278) rec = &el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7279) flags = rec->e_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7280) range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7282) if (i == 0 && ocfs2_is_empty_extent(rec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7284) * Lower levels depend on this never happening, but it's best
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7285) * to check it up here before changing the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7287) if (root_el->l_tree_depth && rec->e_int_clusters == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7288) mlog(ML_ERROR, "Inode %lu has an empty "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7289) "extent record, depth %u\n", inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7290) le16_to_cpu(root_el->l_tree_depth));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7291) status = ocfs2_remove_rightmost_empty_extent(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7292) &et, path, &dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7293) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7294) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7295) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7298) ocfs2_reinit_path(path, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7299) goto start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7300) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7301) trunc_cpos = le32_to_cpu(rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7302) trunc_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7303) blkno = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7305) } else if (le32_to_cpu(rec->e_cpos) >= new_highest_cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7307) * Truncate entire record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7309) trunc_cpos = le32_to_cpu(rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7310) trunc_len = ocfs2_rec_clusters(el, rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7311) blkno = le64_to_cpu(rec->e_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7312) } else if (range > new_highest_cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7314) * Partial truncate. it also should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7315) * the last truncate we're doing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7317) trunc_cpos = new_highest_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7318) trunc_len = range - new_highest_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7319) coff = new_highest_cpos - le32_to_cpu(rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7320) blkno = le64_to_cpu(rec->e_blkno) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7321) ocfs2_clusters_to_blocks(inode->i_sb, coff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7322) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7324) * Truncate completed, leave happily.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7326) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7327) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7330) phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7332) if ((flags & OCFS2_EXT_REFCOUNTED) && trunc_len && !ref_tree) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7333) status = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7334) &ref_tree, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7335) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7336) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7337) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7341) status = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7342) phys_cpos, trunc_len, flags, &dealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7343) refcount_loc, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7344) if (status < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7345) mlog_errno(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7346) goto bail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7349) ocfs2_reinit_path(path, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7352) * The check above will catch the case where we've truncated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7353) * away all allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7355) goto start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7357) bail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7358) if (ref_tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7359) ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7361) ocfs2_schedule_truncate_log_flush(osb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7363) ocfs2_run_deallocs(osb, &dealloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7365) ocfs2_free_path(path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7367) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7371) * 'start' is inclusive, 'end' is not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7373) int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7374) unsigned int start, unsigned int end, int trunc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7376) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7377) unsigned int numbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7378) handle_t *handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7379) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7380) struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7381) struct ocfs2_inline_data *idata = &di->id2.i_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7383) /* No need to punch hole beyond i_size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7384) if (start >= i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7385) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7387) if (end > i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7388) end = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7390) BUG_ON(start > end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7392) if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7393) !(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7394) !ocfs2_supports_inline_data(osb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7395) ocfs2_error(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7396) "Inline data flags for inode %llu don't agree! Disk: 0x%x, Memory: 0x%x, Superblock: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7397) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7398) le16_to_cpu(di->i_dyn_features),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7399) OCFS2_I(inode)->ip_dyn_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7400) osb->s_feature_incompat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7401) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7402) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7405) handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7406) if (IS_ERR(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7407) ret = PTR_ERR(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7408) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7409) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7412) ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7413) OCFS2_JOURNAL_ACCESS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7414) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7415) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7416) goto out_commit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7419) numbytes = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7420) memset(idata->id_data + start, 0, numbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7423) * No need to worry about the data page here - it's been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7424) * truncated already and inline data doesn't need it for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7425) * pushing zero's to disk, so we'll let readpage pick it up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7426) * later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7428) if (trunc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7429) i_size_write(inode, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7430) di->i_size = cpu_to_le64(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7433) inode->i_blocks = ocfs2_inode_sector_count(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7434) inode->i_ctime = inode->i_mtime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7436) di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7437) di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7439) ocfs2_update_inode_fsync_trans(handle, inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7440) ocfs2_journal_dirty(handle, di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7442) out_commit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7443) ocfs2_commit_trans(osb, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7445) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7446) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7449) static int ocfs2_trim_extent(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7450) struct ocfs2_group_desc *gd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7451) u64 group, u32 start, u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7453) u64 discard, bcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7454) struct ocfs2_super *osb = OCFS2_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7456) bcount = ocfs2_clusters_to_blocks(sb, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7457) discard = ocfs2_clusters_to_blocks(sb, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7459) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7460) * For the first cluster group, the gd->bg_blkno is not at the start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7461) * of the group, but at an offset from the start. If we add it while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7462) * calculating discard for first group, we will wrongly start fstrim a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7463) * few blocks after the desried start block and the range can cross
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7464) * over into the next cluster group. So, add it only if this is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7465) * the first cluster group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7467) if (group != osb->first_cluster_group_blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7468) discard += le64_to_cpu(gd->bg_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7470) trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7472) return sb_issue_discard(sb, discard, bcount, GFP_NOFS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7475) static int ocfs2_trim_group(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7476) struct ocfs2_group_desc *gd, u64 group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7477) u32 start, u32 max, u32 minbits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7479) int ret = 0, count = 0, next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7480) void *bitmap = gd->bg_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7482) if (le16_to_cpu(gd->bg_free_bits_count) < minbits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7483) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7485) trace_ocfs2_trim_group((unsigned long long)le64_to_cpu(gd->bg_blkno),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7486) start, max, minbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7488) while (start < max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7489) start = ocfs2_find_next_zero_bit(bitmap, max, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7490) if (start >= max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7491) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7492) next = ocfs2_find_next_bit(bitmap, max, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7494) if ((next - start) >= minbits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7495) ret = ocfs2_trim_extent(sb, gd, group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7496) start, next - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7497) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7498) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7499) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7501) count += next - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7503) start = next + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7505) if (fatal_signal_pending(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7506) count = -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7507) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7510) if ((le16_to_cpu(gd->bg_free_bits_count) - count) < minbits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7511) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7514) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7515) count = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7517) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7520) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7521) int ocfs2_trim_mainbm(struct super_block *sb, struct fstrim_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7523) struct ocfs2_super *osb = OCFS2_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7524) u64 start, len, trimmed = 0, first_group, last_group = 0, group = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7525) int ret, cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7526) u32 first_bit, last_bit, minlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7527) struct buffer_head *main_bm_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7528) struct inode *main_bm_inode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7529) struct buffer_head *gd_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7530) struct ocfs2_dinode *main_bm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7531) struct ocfs2_group_desc *gd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7533) start = range->start >> osb->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7534) len = range->len >> osb->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7535) minlen = range->minlen >> osb->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7537) if (minlen >= osb->bitmap_cpg || range->len < sb->s_blocksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7538) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7540) trace_ocfs2_trim_mainbm(start, len, minlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7542) next_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7543) main_bm_inode = ocfs2_get_system_file_inode(osb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7544) GLOBAL_BITMAP_SYSTEM_INODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7545) OCFS2_INVALID_SLOT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7546) if (!main_bm_inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7547) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7548) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7549) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7552) inode_lock(main_bm_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7554) ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7555) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7556) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7557) goto out_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7559) main_bm = (struct ocfs2_dinode *)main_bm_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7562) * Do some check before trim the first group.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7563) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7564) if (!group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7565) if (start >= le32_to_cpu(main_bm->i_clusters)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7566) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7567) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7570) if (start + len > le32_to_cpu(main_bm->i_clusters))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7571) len = le32_to_cpu(main_bm->i_clusters) - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7574) * Determine first and last group to examine based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7575) * start and len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7577) first_group = ocfs2_which_cluster_group(main_bm_inode, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7578) if (first_group == osb->first_cluster_group_blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7579) first_bit = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7580) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7581) first_bit = start - ocfs2_blocks_to_clusters(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7582) first_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7583) last_group = ocfs2_which_cluster_group(main_bm_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7584) start + len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7585) group = first_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7588) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7589) if (first_bit + len >= osb->bitmap_cpg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7590) last_bit = osb->bitmap_cpg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7591) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7592) last_bit = first_bit + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7594) ret = ocfs2_read_group_descriptor(main_bm_inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7595) main_bm, group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7596) &gd_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7597) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7598) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7599) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7602) gd = (struct ocfs2_group_desc *)gd_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7603) cnt = ocfs2_trim_group(sb, gd, group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7604) first_bit, last_bit, minlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7605) brelse(gd_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7606) gd_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7607) if (cnt < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7608) ret = cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7609) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7610) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7613) trimmed += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7614) len -= osb->bitmap_cpg - first_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7615) first_bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7616) if (group == osb->first_cluster_group_blkno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7617) group = ocfs2_clusters_to_blocks(sb, osb->bitmap_cpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7618) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7619) group += ocfs2_clusters_to_blocks(sb, osb->bitmap_cpg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7620) } while (0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7622) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7623) ocfs2_inode_unlock(main_bm_inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7624) brelse(main_bm_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7625) main_bm_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7626) out_mutex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7627) inode_unlock(main_bm_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7628) iput(main_bm_inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7630) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7631) * If all the groups trim are not done or failed, but we should release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7632) * main_bm related locks for avoiding the current IO starve, then go to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7633) * trim the next group
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7635) if (ret >= 0 && group <= last_group) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7636) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7637) goto next_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7639) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7640) range->len = trimmed * sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7641) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7644) int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7646) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7647) struct ocfs2_super *osb = OCFS2_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7648) struct ocfs2_trim_fs_info info, *pinfo = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7650) ocfs2_trim_fs_lock_res_init(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7652) trace_ocfs2_trim_fs(range->start, range->len, range->minlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7654) ret = ocfs2_trim_fs_lock(osb, NULL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7655) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7656) if (ret != -EAGAIN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7657) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7658) ocfs2_trim_fs_lock_res_uninit(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7659) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7662) mlog(ML_NOTICE, "Wait for trim on device (%s) to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7663) "finish, which is running from another node.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7664) osb->dev_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7665) ret = ocfs2_trim_fs_lock(osb, &info, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7666) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7667) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7668) ocfs2_trim_fs_lock_res_uninit(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7669) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7672) if (info.tf_valid && info.tf_success &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7673) info.tf_start == range->start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7674) info.tf_len == range->len &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7675) info.tf_minlen == range->minlen) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7676) /* Avoid sending duplicated trim to a shared device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7677) mlog(ML_NOTICE, "The same trim on device (%s) was "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7678) "just done from node (%u), return.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7679) osb->dev_str, info.tf_nodenum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7680) range->len = info.tf_trimlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7681) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7685) info.tf_nodenum = osb->node_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7686) info.tf_start = range->start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7687) info.tf_len = range->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7688) info.tf_minlen = range->minlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7690) ret = ocfs2_trim_mainbm(sb, range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7692) info.tf_trimlen = range->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7693) info.tf_success = (ret < 0 ? 0 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7694) pinfo = &info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7695) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7696) ocfs2_trim_fs_unlock(osb, pinfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7697) ocfs2_trim_fs_lock_res_uninit(osb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7698) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7699) }