^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "reiserfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * To make any changes in the tree we find a node that contains item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * to be changed/deleted or position in the node we insert a new item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * to. We call this node S. To do balancing we need to decide what we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * will shift to left/right neighbor, or to a new node, where new item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * will be etc. To make this analysis simpler we build virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * node. Virtual node is an array of items, that will replace items of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * node S. (For instance if we are going to delete an item, virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * node does not contain it). Virtual node keeps information about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * item sizes and types, mergeability of first and last items, sizes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * of all entries in directory item. We use this array of items when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * calculating what we can shift to neighbors and how many nodes we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * have to have if we do not any shiftings, if we shift to left/right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * neighbor or to both.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Takes item number in virtual node, returns number of item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * that it has in source buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static inline int old_item_num(int new_num, int affected_item_num, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (mode == M_PASTE || mode == M_CUT || new_num < affected_item_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return new_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) if (mode == M_INSERT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) RFALSE(new_num == 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) "vs-8005: for INSERT mode and item number of inserted item");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) return new_num - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) RFALSE(mode != M_DELETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) "vs-8010: old_item_num: mode must be M_DELETE (mode = \'%c\'",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /* delete mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return new_num + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static void create_virtual_node(struct tree_balance *tb, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct item_head *ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct virtual_node *vn = tb->tb_vn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int new_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct buffer_head *Sh; /* this comes from tb->S[h] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) Sh = PATH_H_PBUFFER(tb->tb_path, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* size of changed node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) vn->vn_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) MAX_CHILD_SIZE(Sh) - B_FREE_SPACE(Sh) + tb->insert_size[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* for internal nodes array if virtual items is not created */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (h) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) vn->vn_nr_item = (vn->vn_size - DC_SIZE) / (DC_SIZE + KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /* number of items in virtual node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) vn->vn_nr_item =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) B_NR_ITEMS(Sh) + ((vn->vn_mode == M_INSERT) ? 1 : 0) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) ((vn->vn_mode == M_DELETE) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* first virtual item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) vn->vn_vi = (struct virtual_item *)(tb->tb_vn + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) memset(vn->vn_vi, 0, vn->vn_nr_item * sizeof(struct virtual_item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) vn->vn_free_ptr += vn->vn_nr_item * sizeof(struct virtual_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* first item in the node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) ih = item_head(Sh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* define the mergeability for 0-th item (if it is not being deleted) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (op_is_left_mergeable(&ih->ih_key, Sh->b_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) && (vn->vn_mode != M_DELETE || vn->vn_affected_item_num))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) vn->vn_vi[0].vi_type |= VI_TYPE_LEFT_MERGEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * go through all items that remain in the virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * node (except for the new (inserted) one)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) for (new_num = 0; new_num < vn->vn_nr_item; new_num++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct virtual_item *vi = vn->vn_vi + new_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int is_affected =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ((new_num != vn->vn_affected_item_num) ? 0 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (is_affected && vn->vn_mode == M_INSERT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* get item number in source node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) j = old_item_num(new_num, vn->vn_affected_item_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) vn->vn_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) vi->vi_item_len += ih_item_len(ih + j) + IH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) vi->vi_ih = ih + j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) vi->vi_item = ih_item_body(Sh, ih + j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) vi->vi_uarea = vn->vn_free_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * FIXME: there is no check that item operation did not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * consume too much memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) vn->vn_free_ptr +=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) op_create_vi(vn, vi, is_affected, tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) reiserfs_panic(tb->tb_sb, "vs-8030",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) "virtual node space consumed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (!is_affected)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /* this is not being changed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (vn->vn_mode == M_PASTE || vn->vn_mode == M_CUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) vn->vn_vi[new_num].vi_item_len += tb->insert_size[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* pointer to data which is going to be pasted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) vi->vi_new_data = vn->vn_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* virtual inserted item is not defined yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (vn->vn_mode == M_INSERT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct virtual_item *vi = vn->vn_vi + vn->vn_affected_item_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) RFALSE(vn->vn_ins_ih == NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) "vs-8040: item header of inserted item is not specified");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) vi->vi_item_len = tb->insert_size[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) vi->vi_ih = vn->vn_ins_ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) vi->vi_item = vn->vn_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) vi->vi_uarea = vn->vn_free_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) op_create_vi(vn, vi, 0 /*not pasted or cut */ ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * set right merge flag we take right delimiting key and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * check whether it is a mergeable item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (tb->CFR[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct reiserfs_key *key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) key = internal_key(tb->CFR[0], tb->rkey[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (op_is_left_mergeable(key, Sh->b_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) && (vn->vn_mode != M_DELETE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) || vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) vn->vn_vi[vn->vn_nr_item - 1].vi_type |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) VI_TYPE_RIGHT_MERGEABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (op_is_left_mergeable(key, Sh->b_size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) !(vn->vn_mode != M_DELETE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) || vn->vn_affected_item_num != B_NR_ITEMS(Sh) - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * we delete last item and it could be merged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * with right neighbor's first item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) (B_NR_ITEMS(Sh) == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) && is_direntry_le_ih(item_head(Sh, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) && ih_entry_count(item_head(Sh, 0)) == 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * node contains more than 1 item, or item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * is not directory item, or this item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * contains more than 1 entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) print_block(Sh, 0, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) reiserfs_panic(tb->tb_sb, "vs-8045",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) "rdkey %k, affected item==%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) "(mode==%c) Must be %c",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) key, vn->vn_affected_item_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) vn->vn_mode, M_DELETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * Using virtual node check, how many items can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * shifted to left neighbor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static void check_left(struct tree_balance *tb, int h, int cur_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct virtual_node *vn = tb->tb_vn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct virtual_item *vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int d_size, ih_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) RFALSE(cur_free < 0, "vs-8050: cur_free (%d) < 0", cur_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* internal level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (h > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) tb->lnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* leaf level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (!cur_free || !vn->vn_nr_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* no free space or nothing to move */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) tb->lnum[h] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) tb->lbytes = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) RFALSE(!PATH_H_PPARENT(tb->tb_path, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) "vs-8055: parent does not exist or invalid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) vi = vn->vn_vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if ((unsigned int)cur_free >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) (vn->vn_size -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ((vi->vi_type & VI_TYPE_LEFT_MERGEABLE) ? IH_SIZE : 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) /* all contents of S[0] fits into L[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) RFALSE(vn->vn_mode == M_INSERT || vn->vn_mode == M_PASTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) "vs-8055: invalid mode or balance condition failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) tb->lnum[0] = vn->vn_nr_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) tb->lbytes = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) d_size = 0, ih_size = IH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* first item may be merge with last item in left neighbor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (vi->vi_type & VI_TYPE_LEFT_MERGEABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) d_size = -((int)IH_SIZE), ih_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) tb->lnum[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) for (i = 0; i < vn->vn_nr_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) i++, ih_size = IH_SIZE, d_size = 0, vi++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) d_size += vi->vi_item_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (cur_free >= d_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* the item can be shifted entirely */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) cur_free -= d_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) tb->lnum[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* the item cannot be shifted entirely, try to split it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * check whether L[0] can hold ih and at least one byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * of the item body
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) /* cannot shift even a part of the current item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (cur_free <= ih_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) tb->lbytes = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) cur_free -= ih_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) tb->lbytes = op_check_left(vi, cur_free, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (tb->lbytes != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /* count partially shifted item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) tb->lnum[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * Using virtual node check, how many items can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * shifted to right neighbor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static void check_right(struct tree_balance *tb, int h, int cur_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct virtual_node *vn = tb->tb_vn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct virtual_item *vi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int d_size, ih_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) RFALSE(cur_free < 0, "vs-8070: cur_free < 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* internal level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (h > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) tb->rnum[h] = cur_free / (DC_SIZE + KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* leaf level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (!cur_free || !vn->vn_nr_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* no free space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) tb->rnum[h] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) tb->rbytes = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) RFALSE(!PATH_H_PPARENT(tb->tb_path, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) "vs-8075: parent does not exist or invalid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) vi = vn->vn_vi + vn->vn_nr_item - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if ((unsigned int)cur_free >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) (vn->vn_size -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ((vi->vi_type & VI_TYPE_RIGHT_MERGEABLE) ? IH_SIZE : 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* all contents of S[0] fits into R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) RFALSE(vn->vn_mode == M_INSERT || vn->vn_mode == M_PASTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) "vs-8080: invalid mode or balance condition failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) tb->rnum[h] = vn->vn_nr_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) tb->rbytes = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) d_size = 0, ih_size = IH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* last item may be merge with first item in right neighbor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (vi->vi_type & VI_TYPE_RIGHT_MERGEABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) d_size = -(int)IH_SIZE, ih_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) tb->rnum[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) for (i = vn->vn_nr_item - 1; i >= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) i--, d_size = 0, ih_size = IH_SIZE, vi--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) d_size += vi->vi_item_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (cur_free >= d_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) /* the item can be shifted entirely */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) cur_free -= d_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) tb->rnum[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * check whether R[0] can hold ih and at least one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * byte of the item body
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* cannot shift even a part of the current item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (cur_free <= ih_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) tb->rbytes = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * R[0] can hold the header of the item and at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * one byte of its body
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) cur_free -= ih_size; /* cur_free is still > 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) tb->rbytes = op_check_right(vi, cur_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (tb->rbytes != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /* count partially shifted item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) tb->rnum[0]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * from - number of items, which are shifted to left neighbor entirely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * to - number of item, which are shifted to right neighbor entirely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * from_bytes - number of bytes of boundary item (or directory entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * which are shifted to left neighbor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * to_bytes - number of bytes of boundary item (or directory entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * which are shifted to right neighbor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static int get_num_ver(int mode, struct tree_balance *tb, int h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) int from, int from_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) int to, int to_bytes, short *snum012, int flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int units;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct virtual_node *vn = tb->tb_vn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) int total_node_size, max_node_size, current_item_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) int needed_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /* position of item we start filling node from */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) int start_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* position of item we finish filling node by */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int end_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * number of first bytes (entries for directory) of start_item-th item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * we do not include into node that is being filled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) int start_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * number of last bytes (entries for directory) of end_item-th item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * we do node include into node that is being filled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int end_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * these are positions in virtual item of items, that are split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * between S[0] and S1new and S1new and S2new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int split_item_positions[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) split_item_positions[0] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) split_item_positions[1] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * We only create additional nodes if we are in insert or paste mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * or we are in replace mode at the internal level. If h is 0 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * the mode is M_REPLACE then in fix_nodes we change the mode to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * paste or insert before we get here in the code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) RFALSE(tb->insert_size[h] < 0 || (mode != M_INSERT && mode != M_PASTE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) "vs-8100: insert_size < 0 in overflow");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) max_node_size = MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, h));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * snum012 [0-2] - number of items, that lay
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * to S[0], first new node and second new node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) snum012[3] = -1; /* s1bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) snum012[4] = -1; /* s2bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) /* internal level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (h > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) i = ((to - from) * (KEY_SIZE + DC_SIZE) + DC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (i == max_node_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return (i / max_node_size + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) /* leaf level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) needed_nodes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) total_node_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /* start from 'from'-th item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) start_item = from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* skip its first 'start_bytes' units */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) start_bytes = ((from_bytes != -1) ? from_bytes : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* last included item is the 'end_item'-th one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) end_item = vn->vn_nr_item - to - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /* do not count last 'end_bytes' units of 'end_item'-th item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) end_bytes = (to_bytes != -1) ? to_bytes : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * go through all item beginning from the start_item-th item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * and ending by the end_item-th item. Do not count first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * 'start_bytes' units of 'start_item'-th item and last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * 'end_bytes' of 'end_item'-th item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) for (i = start_item; i <= end_item; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct virtual_item *vi = vn->vn_vi + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) int skip_from_end = ((i == end_item) ? end_bytes : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) RFALSE(needed_nodes > 3, "vs-8105: too many nodes are needed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* get size of current item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) current_item_size = vi->vi_item_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * do not take in calculation head part (from_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * of from-th item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) current_item_size -=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) op_part_size(vi, 0 /*from start */ , start_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /* do not take in calculation tail part of last item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) current_item_size -=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) op_part_size(vi, 1 /*from end */ , skip_from_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /* if item fits into current node entierly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (total_node_size + current_item_size <= max_node_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) snum012[needed_nodes - 1]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) total_node_size += current_item_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) start_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * virtual item length is longer, than max size of item in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * a node. It is impossible for direct item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (current_item_size > max_node_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) RFALSE(is_direct_le_ih(vi->vi_ih),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) "vs-8110: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) "direct item length is %d. It can not be longer than %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) current_item_size, max_node_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /* we will try to split it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) flow = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* as we do not split items, take new node and continue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (!flow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) needed_nodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) total_node_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * calculate number of item units which fit into node being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * filled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) int free_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) free_space = max_node_size - total_node_size - IH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) units =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) op_check_left(vi, free_space, start_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) skip_from_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * nothing fits into current node, take new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * node and continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (units == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) needed_nodes++, i--, total_node_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* something fits into the current node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) start_bytes += units;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) snum012[needed_nodes - 1 + 3] = units;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (needed_nodes > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) reiserfs_warning(tb->tb_sb, "vs-8111",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) "split_item_position is out of range");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) snum012[needed_nodes - 1]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) split_item_positions[needed_nodes - 1] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) needed_nodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /* continue from the same item with start_bytes != -1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) start_item = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) total_node_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * sum012[4] (if it is not -1) contains number of units of which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * are to be in S1new, snum012[3] - to be in S0. They are supposed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * to be S1bytes and S2bytes correspondingly, so recalculate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (snum012[4] > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) int split_item_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) int bytes_to_r, bytes_to_l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) int bytes_to_S1new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) split_item_num = split_item_positions[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) bytes_to_l =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) ((from == split_item_num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) && from_bytes != -1) ? from_bytes : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) bytes_to_r =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) ((end_item == split_item_num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) && end_bytes != -1) ? end_bytes : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) bytes_to_S1new =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) ((split_item_positions[0] ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) split_item_positions[1]) ? snum012[3] : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /* s2bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) snum012[4] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) op_unit_num(&vn->vn_vi[split_item_num]) - snum012[4] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) bytes_to_r - bytes_to_l - bytes_to_S1new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (vn->vn_vi[split_item_num].vi_index != TYPE_DIRENTRY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) vn->vn_vi[split_item_num].vi_index != TYPE_INDIRECT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) reiserfs_warning(tb->tb_sb, "vs-8115",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) "not directory or indirect item");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* now we know S2bytes, calculate S1bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (snum012[3] > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) int split_item_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int bytes_to_r, bytes_to_l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) int bytes_to_S2new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) split_item_num = split_item_positions[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) bytes_to_l =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) ((from == split_item_num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) && from_bytes != -1) ? from_bytes : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) bytes_to_r =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) ((end_item == split_item_num
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) && end_bytes != -1) ? end_bytes : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) bytes_to_S2new =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ((split_item_positions[0] == split_item_positions[1]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) && snum012[4] != -1) ? snum012[4] : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /* s1bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) snum012[3] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) op_unit_num(&vn->vn_vi[split_item_num]) - snum012[3] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) bytes_to_r - bytes_to_l - bytes_to_S2new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return needed_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * Set parameters for balancing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * Performs write of results of analysis of balancing into structure tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * where it will later be used by the functions that actually do the balancing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * Parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * tb tree_balance structure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * h current level of the node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * lnum number of items from S[h] that must be shifted to L[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * rnum number of items from S[h] that must be shifted to R[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * blk_num number of blocks that S[h] will be splitted into;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * s012 number of items that fall into splitted nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * lbytes number of bytes which flow to the left neighbor from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * item that is not shifted entirely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * rbytes number of bytes which flow to the right neighbor from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * item that is not shifted entirely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * s1bytes number of bytes which flow to the first new node when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * S[0] splits (this number is contained in s012 array)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static void set_parameters(struct tree_balance *tb, int h, int lnum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) int rnum, int blk_num, short *s012, int lb, int rb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) tb->lnum[h] = lnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) tb->rnum[h] = rnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) tb->blknum[h] = blk_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* only for leaf level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (h == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (s012 != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) tb->s0num = *s012++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) tb->snum[0] = *s012++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) tb->snum[1] = *s012++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) tb->sbytes[0] = *s012++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) tb->sbytes[1] = *s012;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) tb->lbytes = lb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) tb->rbytes = rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) PROC_INFO_ADD(tb->tb_sb, lnum[h], lnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) PROC_INFO_ADD(tb->tb_sb, rnum[h], rnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) PROC_INFO_ADD(tb->tb_sb, lbytes[h], lb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) PROC_INFO_ADD(tb->tb_sb, rbytes[h], rb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * check if node disappears if we shift tb->lnum[0] items to left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * neighbor and tb->rnum[0] to the right one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static int is_leaf_removable(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) struct virtual_node *vn = tb->tb_vn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) int to_left, to_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) int remain_items;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * number of items that will be shifted to left (right) neighbor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * entirely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) to_left = tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) to_right = tb->rnum[0] - ((tb->rbytes != -1) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) remain_items = vn->vn_nr_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* how many items remain in S[0] after shiftings to neighbors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) remain_items -= (to_left + to_right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /* all content of node can be shifted to neighbors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (remain_items < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) set_parameters(tb, 0, to_left, vn->vn_nr_item - to_left, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /* S[0] is not removable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (remain_items > 1 || tb->lbytes == -1 || tb->rbytes == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /* check whether we can divide 1 remaining item between neighbors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) /* get size of remaining item (in item units) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) size = op_unit_num(&vn->vn_vi[to_left]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (tb->lbytes + tb->rbytes >= size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) set_parameters(tb, 0, to_left + 1, to_right + 1, 0, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) tb->lbytes, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* check whether L, S, R can be joined in one node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) static int are_leaves_removable(struct tree_balance *tb, int lfree, int rfree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct virtual_node *vn = tb->tb_vn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) int ih_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct buffer_head *S0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) S0 = PATH_H_PBUFFER(tb->tb_path, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) ih_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (vn->vn_nr_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) if (vn->vn_vi[0].vi_type & VI_TYPE_LEFT_MERGEABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) ih_size += IH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (vn->vn_vi[vn->vn_nr_item - 1].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) vi_type & VI_TYPE_RIGHT_MERGEABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) ih_size += IH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /* there was only one item and it will be deleted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) struct item_head *ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) RFALSE(B_NR_ITEMS(S0) != 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) "vs-8125: item number must be 1: it is %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) B_NR_ITEMS(S0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) ih = item_head(S0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (tb->CFR[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) && !comp_short_le_keys(&ih->ih_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) internal_key(tb->CFR[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) tb->rkey[0])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * Directory must be in correct state here: that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * somewhere at the left side should exist first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * directory item. But the item being deleted can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * not be that first one because its right neighbor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * is item of the same directory. (But first item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * always gets deleted in last turn). So, neighbors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * of deleted item can be merged, so we can save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * ih_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (is_direntry_le_ih(ih)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) ih_size = IH_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * we might check that left neighbor exists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * and is of the same directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) RFALSE(le_ih_k_offset(ih) == DOT_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) "vs-8130: first directory item can not be removed until directory is not empty");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (MAX_CHILD_SIZE(S0) + vn->vn_size <= rfree + lfree + ih_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) set_parameters(tb, 0, -1, -1, -1, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) PROC_INFO_INC(tb->tb_sb, leaves_removable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* when we do not split item, lnum and rnum are numbers of entire items */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) #define SET_PAR_SHIFT_LEFT \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (h)\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) int to_l;\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) to_l = (MAX_NR_KEY(Sh)+1 - lpar + vn->vn_nr_item + 1) / 2 -\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) (MAX_NR_KEY(Sh) + 1 - lpar);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) set_parameters (tb, h, to_l, 0, lnver, NULL, -1, -1);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (lset==LEFT_SHIFT_FLOW)\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) set_parameters (tb, h, lpar, 0, lnver, snum012+lset,\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) tb->lbytes, -1);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) else\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) set_parameters (tb, h, lpar - (tb->lbytes!=-1), 0, lnver, snum012+lset,\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) -1, -1);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) #define SET_PAR_SHIFT_RIGHT \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (h)\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) int to_r;\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) to_r = (MAX_NR_KEY(Sh)+1 - rpar + vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 - rpar);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) set_parameters (tb, h, 0, to_r, rnver, NULL, -1, -1);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) }\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) else \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (rset==RIGHT_SHIFT_FLOW)\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) set_parameters (tb, h, 0, rpar, rnver, snum012+rset,\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) -1, tb->rbytes);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) else\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) set_parameters (tb, h, 0, rpar - (tb->rbytes!=-1), rnver, snum012+rset,\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) -1, -1);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) static void free_buffers_in_tb(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) pathrelse(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) for (i = 0; i < MAX_HEIGHT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) brelse(tb->L[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) brelse(tb->R[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) brelse(tb->FL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) brelse(tb->FR[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) brelse(tb->CFL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) brelse(tb->CFR[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) tb->L[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) tb->R[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) tb->FL[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) tb->FR[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) tb->CFL[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) tb->CFR[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * Get new buffers for storing new nodes that are created while balancing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * CARRY_ON - schedule didn't occur while the function worked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * NO_DISK_SPACE - no disk space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) /* The function is NOT SCHEDULE-SAFE! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static int get_empty_nodes(struct tree_balance *tb, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) struct buffer_head *new_bh, *Sh = PATH_H_PBUFFER(tb->tb_path, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) b_blocknr_t *blocknr, blocknrs[MAX_AMOUNT_NEEDED] = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) int counter, number_of_freeblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) int amount_needed; /* number of needed empty blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) int retval = CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct super_block *sb = tb->tb_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * number_of_freeblk is the number of empty blocks which have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * acquired for use by the balancing algorithm minus the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * empty blocks used in the previous levels of the analysis,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * number_of_freeblk = tb->cur_blknum can be non-zero if a schedule
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * occurs after empty blocks are acquired, and the balancing analysis
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * is then restarted, amount_needed is the number needed by this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * level (h) of the balancing analysis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * Note that for systems with many processes writing, it would be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * more layout optimal to calculate the total number needed by all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * levels and then to run reiserfs_new_blocks to get all of them at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * once.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * Initiate number_of_freeblk to the amount acquired prior to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * restart of the analysis or 0 if not restarted, then subtract the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * amount needed by all of the levels of the tree below h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* blknum includes S[h], so we subtract 1 in this calculation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) for (counter = 0, number_of_freeblk = tb->cur_blknum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) counter < h; counter++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) number_of_freeblk -=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) (tb->blknum[counter]) ? (tb->blknum[counter] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 1) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /* Allocate missing empty blocks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) /* if Sh == 0 then we are getting a new root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) amount_needed = (Sh) ? (tb->blknum[h] - 1) : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * Amount_needed = the amount that we need more than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * amount that we have.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (amount_needed > number_of_freeblk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) amount_needed -= number_of_freeblk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) else /* If we have enough already then there is nothing to do. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) * No need to check quota - is not allocated for blocks used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) * for formatted nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (reiserfs_new_form_blocknrs(tb, blocknrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) amount_needed) == NO_DISK_SPACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) return NO_DISK_SPACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) /* for each blocknumber we just got, get a buffer and stick it on FEB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) for (blocknr = blocknrs, counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) counter < amount_needed; blocknr++, counter++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) RFALSE(!*blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) "PAP-8135: reiserfs_new_blocknrs failed when got new blocks");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) new_bh = sb_getblk(sb, *blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) RFALSE(buffer_dirty(new_bh) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) buffer_journaled(new_bh) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) buffer_journal_dirty(new_bh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) "PAP-8140: journaled or dirty buffer %b for the new block",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) /* Put empty buffers into the array. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) RFALSE(tb->FEB[tb->cur_blknum],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) "PAP-8141: busy slot for new buffer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) set_buffer_journal_new(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) tb->FEB[tb->cur_blknum++] = new_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (retval == CARRY_ON && FILESYSTEM_CHANGED_TB(tb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) retval = REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * Get free space of the left neighbor, which is stored in the parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * node of the left neighbor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static int get_lfree(struct tree_balance *tb, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct buffer_head *l, *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) (l = tb->FL[h]) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (f == l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) order = B_NR_ITEMS(l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) f = l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * Get free space of the right neighbor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * which is stored in the parent node of the right neighbor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) static int get_rfree(struct tree_balance *tb, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct buffer_head *r, *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if ((f = PATH_H_PPARENT(tb->tb_path, h)) == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) (r = tb->FR[h]) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (f == r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) order = PATH_H_B_ITEM_ORDER(tb->tb_path, h) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) f = r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return (MAX_CHILD_SIZE(f) - dc_size(B_N_CHILD(f, order)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /* Check whether left neighbor is in memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) static int is_left_neighbor_in_cache(struct tree_balance *tb, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) struct buffer_head *father, *left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) struct super_block *sb = tb->tb_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) b_blocknr_t left_neighbor_blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) int left_neighbor_position;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /* Father of the left neighbor does not exist. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (!tb->FL[h])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /* Calculate father of the node to be balanced. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) father = PATH_H_PBUFFER(tb->tb_path, h + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) RFALSE(!father ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) !B_IS_IN_TREE(father) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) !B_IS_IN_TREE(tb->FL[h]) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) !buffer_uptodate(father) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) !buffer_uptodate(tb->FL[h]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) "vs-8165: F[h] (%b) or FL[h] (%b) is invalid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) father, tb->FL[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * Get position of the pointer to the left neighbor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * into the left father.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) left_neighbor_position = (father == tb->FL[h]) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) tb->lkey[h] : B_NR_ITEMS(tb->FL[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) /* Get left neighbor block number. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) left_neighbor_blocknr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) B_N_CHILD_NUM(tb->FL[h], left_neighbor_position);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) /* Look for the left neighbor in the cache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if ((left = sb_find_get_block(sb, left_neighbor_blocknr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) RFALSE(buffer_uptodate(left) && !B_IS_IN_TREE(left),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) "vs-8170: left neighbor (%b %z) is not in the tree",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) left, left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) put_bh(left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) #define LEFT_PARENTS 'l'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) #define RIGHT_PARENTS 'r'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) static void decrement_key(struct cpu_key *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /* call item specific function for this key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) item_ops[cpu_key_k_type(key)]->decrement_key(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * Calculate far left/right parent of the left/right neighbor of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * current node, that is calculate the left/right (FL[h]/FR[h]) neighbor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * of the parent F[h].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * Calculate left/right common parent of the current node and L[h]/R[h].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * Calculate left/right delimiting key position.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * Returns: PATH_INCORRECT - path in the tree is not correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * SCHEDULE_OCCURRED - schedule occurred while the function worked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * CARRY_ON - schedule didn't occur while the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * worked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) static int get_far_parent(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) int h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) struct buffer_head **pfather,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct buffer_head **pcom_father, char c_lr_par)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) struct buffer_head *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) INITIALIZE_PATH(s_path_to_neighbor_father);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) struct treepath *path = tb->tb_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) struct cpu_key s_lr_father_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) int counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) position = INT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) first_last_position = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) path_offset = PATH_H_PATH_OFFSET(path, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * Starting from F[h] go upwards in the tree, and look for the common
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * ancestor of F[h], and its neighbor l/r, that should be obtained.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) counter = path_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) RFALSE(counter < FIRST_PATH_ELEMENT_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) "PAP-8180: invalid path length");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) for (; counter > FIRST_PATH_ELEMENT_OFFSET; counter--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * Check whether parent of the current buffer in the path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * is really parent in the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) if (!B_IS_IN_TREE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) (parent = PATH_OFFSET_PBUFFER(path, counter - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) /* Check whether position in the parent is correct. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if ((position =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) PATH_OFFSET_POSITION(path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) counter - 1)) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) B_NR_ITEMS(parent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * Check whether parent at the path really points
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * to the child.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (B_N_CHILD_NUM(parent, position) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) PATH_OFFSET_PBUFFER(path, counter)->b_blocknr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * Return delimiting key if position in the parent is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * equal to first/last one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (c_lr_par == RIGHT_PARENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) first_last_position = B_NR_ITEMS(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (position != first_last_position) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) *pcom_father = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) get_bh(*pcom_father);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /*(*pcom_father = parent)->b_count++; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) /* if we are in the root of the tree, then there is no common father */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (counter == FIRST_PATH_ELEMENT_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * Check whether first buffer in the path is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * root of the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (PATH_OFFSET_PBUFFER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) (tb->tb_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) SB_ROOT_BLOCK(tb->tb_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) *pfather = *pcom_father = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) RFALSE(B_LEVEL(*pcom_father) <= DISK_LEAF_NODE_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) "PAP-8185: (%b %z) level too small",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) *pcom_father, *pcom_father);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /* Check whether the common parent is locked. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (buffer_locked(*pcom_father)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /* Release the write lock while the buffer is busy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) int depth = reiserfs_write_unlock_nested(tb->tb_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) __wait_on_buffer(*pcom_father);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) reiserfs_write_lock_nested(tb->tb_sb, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (FILESYSTEM_CHANGED_TB(tb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) brelse(*pcom_father);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) * So, we got common parent of the current node and its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) * left/right neighbor. Now we are getting the parent of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) * left/right neighbor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /* Form key to get parent of the left/right neighbor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) le_key2cpu_key(&s_lr_father_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) internal_key(*pcom_father,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) (c_lr_par ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) LEFT_PARENTS) ? (tb->lkey[h - 1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) position -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 1) : (tb->rkey[h -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) position)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (c_lr_par == LEFT_PARENTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) decrement_key(&s_lr_father_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) if (search_by_key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) (tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) h + 1) == IO_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /* path is released */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return IO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (FILESYSTEM_CHANGED_TB(tb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) pathrelse(&s_path_to_neighbor_father);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) brelse(*pcom_father);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) *pfather = PATH_PLAST_BUFFER(&s_path_to_neighbor_father);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) RFALSE(B_LEVEL(*pfather) != h + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) "PAP-8190: (%b %z) level too small", *pfather, *pfather);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) RFALSE(s_path_to_neighbor_father.path_length <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) FIRST_PATH_ELEMENT_OFFSET, "PAP-8192: path length is too small");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) s_path_to_neighbor_father.path_length--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) pathrelse(&s_path_to_neighbor_father);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * Get parents of neighbors of node in the path(S[path_offset]) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * common parents of S[path_offset] and L[path_offset]/R[path_offset]:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * F[path_offset], FL[path_offset], FR[path_offset], CFL[path_offset],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * CFR[path_offset].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * Calculate numbers of left and right delimiting keys position:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * lkey[path_offset], rkey[path_offset].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) * CARRY_ON - schedule didn't occur while the function worked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) static int get_parents(struct tree_balance *tb, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) struct treepath *path = tb->tb_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) int position,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) struct buffer_head *curf, *curcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) /* Current node is the root of the tree or will be root of the tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * The root can not have parents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * Release nodes which previously were obtained as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * parents of the current node neighbors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) brelse(tb->FL[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) brelse(tb->CFL[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) brelse(tb->FR[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) brelse(tb->CFR[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) tb->FL[h] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) tb->CFL[h] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) tb->FR[h] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) tb->CFR[h] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) /* Get parent FL[path_offset] of L[path_offset]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) position = PATH_OFFSET_POSITION(path, path_offset - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (position) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) /* Current node is not the first child of its parent. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) curf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) get_bh(curf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) get_bh(curf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) tb->lkey[h] = position - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) * Calculate current parent of L[path_offset], which is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * left neighbor of the current node. Calculate current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) * common parent of L[path_offset] and the current node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * Note that CFL[path_offset] not equal FL[path_offset] and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) * CFL[path_offset] not equal F[path_offset].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * Calculate lkey[path_offset].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if ((ret = get_far_parent(tb, h + 1, &curf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) &curcf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) LEFT_PARENTS)) != CARRY_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) brelse(tb->FL[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) tb->FL[h] = curf; /* New initialization of FL[h]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) brelse(tb->CFL[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) tb->CFL[h] = curcf; /* New initialization of CFL[h]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) RFALSE((curf && !B_IS_IN_TREE(curf)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) (curcf && !B_IS_IN_TREE(curcf)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) "PAP-8195: FL (%b) or CFL (%b) is invalid", curf, curcf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) /* Get parent FR[h] of R[h]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /* Current node is the last child of F[h]. FR[h] != F[h]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) if (position == B_NR_ITEMS(PATH_H_PBUFFER(path, h + 1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) * Calculate current parent of R[h], which is the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * neighbor of F[h]. Calculate current common parent of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) * R[h] and current node. Note that CFR[h] not equal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) * FR[path_offset] and CFR[h] not equal F[h].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) if ((ret =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) get_far_parent(tb, h + 1, &curf, &curcf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) RIGHT_PARENTS)) != CARRY_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /* Current node is not the last child of its parent F[h]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) curf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) get_bh(curf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) get_bh(curf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) tb->rkey[h] = position;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) brelse(tb->FR[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) /* New initialization of FR[path_offset]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) tb->FR[h] = curf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) brelse(tb->CFR[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /* New initialization of CFR[path_offset]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) tb->CFR[h] = curcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) RFALSE((curf && !B_IS_IN_TREE(curf)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) (curcf && !B_IS_IN_TREE(curcf)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) "PAP-8205: FR (%b) or CFR (%b) is invalid", curf, curcf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * it is possible to remove node as result of shiftings to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) * neighbors even when we insert or paste item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) static inline int can_node_be_removed(int mode, int lfree, int sfree, int rfree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct tree_balance *tb, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) struct buffer_head *Sh = PATH_H_PBUFFER(tb->tb_path, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) int levbytes = tb->insert_size[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) struct item_head *ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) struct reiserfs_key *r_key = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) ih = item_head(Sh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (tb->CFR[h])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) r_key = internal_key(tb->CFR[h], tb->rkey[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (lfree + rfree + sfree < MAX_CHILD_SIZE(Sh) + levbytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /* shifting may merge items which might save space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) ((!h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) && op_is_left_mergeable(&ih->ih_key, Sh->b_size)) ? IH_SIZE : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) ((!h && r_key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) && op_is_left_mergeable(r_key, Sh->b_size)) ? IH_SIZE : 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) + ((h) ? KEY_SIZE : 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /* node can not be removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (sfree >= levbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /* new item fits into node S[h] without any shifting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (!h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) tb->s0num =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) B_NR_ITEMS(Sh) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) ((mode == M_INSERT) ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) return NO_BALANCING_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) PROC_INFO_INC(tb->tb_sb, can_node_be_removed[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) return !NO_BALANCING_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * Check whether current node S[h] is balanced when increasing its size by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * Inserting or Pasting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * Calculate parameters for balancing for current level h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * Parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * tb tree_balance structure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * h current level of the node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * inum item number in S[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * mode i - insert, p - paste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * Returns: 1 - schedule occurred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) * 0 - balancing for higher levels needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) * -1 - no balancing for higher levels needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) * -2 - no disk space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) /* ip means Inserting or Pasting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static int ip_check_balance(struct tree_balance *tb, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) struct virtual_node *vn = tb->tb_vn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * Number of bytes that must be inserted into (value is negative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * if bytes are deleted) buffer which contains node being balanced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * The mnemonic is that the attempted change in node space used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * level is levbytes bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) int levbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) int lfree, sfree, rfree /* free space in L, S and R */ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * nver is short for number of vertixes, and lnver is the number if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * we shift to the left, rnver is the number if we shift to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * right, and lrnver is the number if we shift in both directions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) * The goal is to minimize first the number of vertixes, and second,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * the number of vertixes whose contents are changed by shifting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * and third the number of uncached vertixes whose contents are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * changed by shifting and must be read from disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) int nver, lnver, rnver, lrnver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) * used at leaf level only, S0 = S[0] is the node being balanced,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * sInum [ I = 0,1,2 ] is the number of items that will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * remain in node SI after balancing. S1 and S2 are new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * nodes that might be created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * we perform 8 calls to get_num_ver(). For each call we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) * calculate five parameters. where 4th parameter is s1bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) * and 5th - s2bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) * s0num, s1num, s2num for 8 cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) * 0,1 - do not shift and do not shift but bottle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) * 2 - shift only whole item to left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * 3 - shift to left and bottle as much as possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * 4,5 - shift to right (whole items and as much as possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * 6,7 - shift to both directions (whole items and as much as possible)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) short snum012[40] = { 0, };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /* Sh is the node whose balance is currently being checked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) struct buffer_head *Sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) Sh = PATH_H_PBUFFER(tb->tb_path, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) levbytes = tb->insert_size[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) /* Calculate balance parameters for creating new root. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) if (!Sh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) if (!h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) reiserfs_panic(tb->tb_sb, "vs-8210",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) "S[0] can not be 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) switch (ret = get_empty_nodes(tb, h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) /* no balancing for higher levels needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) case CARRY_ON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) return NO_BALANCING_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) case NO_DISK_SPACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) case REPEAT_SEARCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) reiserfs_panic(tb->tb_sb, "vs-8215", "incorrect "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) "return value of get_empty_nodes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /* get parents of S[h] neighbors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) ret = get_parents(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (ret != CARRY_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) sfree = B_FREE_SPACE(Sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) /* get free space of neighbors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) rfree = get_rfree(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) lfree = get_lfree(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) /* and new item fits into node S[h] without any shifting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) if (can_node_be_removed(vn->vn_mode, lfree, sfree, rfree, tb, h) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) NO_BALANCING_NEEDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return NO_BALANCING_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) create_virtual_node(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * determine maximal number of items we can shift to the left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * neighbor (in tb structure) and the maximal number of bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) * that can flow to the left neighbor from the left most liquid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * item that cannot be shifted from S[0] entirely (returned value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) check_left(tb, h, lfree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * determine maximal number of items we can shift to the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * neighbor (in tb structure) and the maximal number of bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) * that can flow to the right neighbor from the right most liquid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) * item that cannot be shifted from S[0] entirely (returned value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) check_right(tb, h, rfree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * all contents of internal node S[h] can be moved into its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * neighbors, S[h] will be removed after balancing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) if (h && (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) int to_r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * Since we are working on internal nodes, and our internal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * nodes have fixed size entries, then we can balance by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * number of items rather than the space they consume. In this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * routine we set the left node equal to the right node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * allowing a difference of less than or equal to 1 child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) * pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) to_r =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) tb->rnum[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) * this checks balance condition, that any two neighboring nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) * can not fit in one node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) RFALSE(h &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) (tb->lnum[h] >= vn->vn_nr_item + 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) tb->rnum[h] >= vn->vn_nr_item + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) "vs-8220: tree is not balanced on internal level");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) RFALSE(!h && ((tb->lnum[h] >= vn->vn_nr_item && (tb->lbytes == -1)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) (tb->rnum[h] >= vn->vn_nr_item && (tb->rbytes == -1))),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) "vs-8225: tree is not balanced on leaf level");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) * all contents of S[0] can be moved into its neighbors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) * S[0] will be removed after balancing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (!h && is_leaf_removable(tb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * why do we perform this check here rather than earlier??
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * Answer: we can win 1 node in some cases above. Moreover we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * checked it above, when we checked, that S[0] is not removable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * in principle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) /* new item fits into node S[h] without any shifting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) if (sfree >= levbytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) if (!h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) tb->s0num = vn->vn_nr_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) return NO_BALANCING_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) int lpar, rpar, nset, lset, rset, lrset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /* regular overflowing of the node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * get_num_ver works in 2 modes (FLOW & NO_FLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) * lpar, rpar - number of items we can shift to left/right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) * neighbor (including splitting item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * nset, lset, rset, lrset - shows, whether flowing items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) * give better packing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) #define FLOW 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) #define NO_FLOW 0 /* do not any splitting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) /* we choose one of the following */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) #define NOTHING_SHIFT_NO_FLOW 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) #define NOTHING_SHIFT_FLOW 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) #define LEFT_SHIFT_NO_FLOW 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) #define LEFT_SHIFT_FLOW 15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) #define RIGHT_SHIFT_NO_FLOW 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) #define RIGHT_SHIFT_FLOW 25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) #define LR_SHIFT_NO_FLOW 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) #define LR_SHIFT_FLOW 35
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) lpar = tb->lnum[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) rpar = tb->rnum[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * calculate number of blocks S[h] must be split into when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * nothing is shifted to the neighbors, as well as number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) * items in each part of the split node (s012 numbers),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) * and number of bytes (s1bytes) of the shared drop which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) * flow to S1 if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) nset = NOTHING_SHIFT_NO_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) nver = get_num_ver(vn->vn_mode, tb, h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 0, -1, h ? vn->vn_nr_item : 0, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) snum012, NO_FLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (!h) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) int nver1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * note, that in this case we try to bottle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * between S[0] and S1 (S1 - the first new node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) nver1 = get_num_ver(vn->vn_mode, tb, h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 0, -1, 0, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) snum012 + NOTHING_SHIFT_FLOW, FLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) if (nver > nver1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) nset = NOTHING_SHIFT_FLOW, nver = nver1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * calculate number of blocks S[h] must be split into when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * l_shift_num first items and l_shift_bytes of the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * most liquid item to be shifted are shifted to the left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * neighbor, as well as number of items in each part of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) * splitted node (s012 numbers), and number of bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) * (s1bytes) of the shared drop which flow to S1 if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) lset = LEFT_SHIFT_NO_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) lnver = get_num_ver(vn->vn_mode, tb, h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) lpar - ((h || tb->lbytes == -1) ? 0 : 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) -1, h ? vn->vn_nr_item : 0, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) snum012 + LEFT_SHIFT_NO_FLOW, NO_FLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) if (!h) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) int lnver1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) lnver1 = get_num_ver(vn->vn_mode, tb, h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) lpar -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) ((tb->lbytes != -1) ? 1 : 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) tb->lbytes, 0, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) snum012 + LEFT_SHIFT_FLOW, FLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (lnver > lnver1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) lset = LEFT_SHIFT_FLOW, lnver = lnver1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) * calculate number of blocks S[h] must be split into when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * r_shift_num first items and r_shift_bytes of the left most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) * liquid item to be shifted are shifted to the right neighbor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) * as well as number of items in each part of the splitted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) * node (s012 numbers), and number of bytes (s1bytes) of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) * shared drop which flow to S1 if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) rset = RIGHT_SHIFT_NO_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) rnver = get_num_ver(vn->vn_mode, tb, h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 0, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) h ? (vn->vn_nr_item - rpar) : (rpar -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) ((tb->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) rbytes !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) -1) ? 1 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 0)), -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) snum012 + RIGHT_SHIFT_NO_FLOW, NO_FLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) if (!h) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) int rnver1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) rnver1 = get_num_ver(vn->vn_mode, tb, h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 0, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) (rpar -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) ((tb->rbytes != -1) ? 1 : 0)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) tb->rbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) snum012 + RIGHT_SHIFT_FLOW, FLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (rnver > rnver1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) rset = RIGHT_SHIFT_FLOW, rnver = rnver1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) * calculate number of blocks S[h] must be split into when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * items are shifted in both directions, as well as number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) * of items in each part of the splitted node (s012 numbers),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * and number of bytes (s1bytes) of the shared drop which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) * flow to S1 if any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) lrset = LR_SHIFT_NO_FLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) lrnver = get_num_ver(vn->vn_mode, tb, h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) lpar - ((h || tb->lbytes == -1) ? 0 : 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) h ? (vn->vn_nr_item - rpar) : (rpar -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) ((tb->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) rbytes !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) -1) ? 1 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 0)), -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) snum012 + LR_SHIFT_NO_FLOW, NO_FLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (!h) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) int lrnver1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) lrnver1 = get_num_ver(vn->vn_mode, tb, h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) lpar -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) ((tb->lbytes != -1) ? 1 : 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) tb->lbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) (rpar -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) ((tb->rbytes != -1) ? 1 : 0)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) tb->rbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) snum012 + LR_SHIFT_FLOW, FLOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (lrnver > lrnver1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) lrset = LR_SHIFT_FLOW, lrnver = lrnver1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * Our general shifting strategy is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) * 1) to minimized number of new nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) * 2) to minimized number of neighbors involved in shifting;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) * 3) to minimized number of disk reads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) /* we can win TWO or ONE nodes by shifting in both directions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (lrnver < lnver && lrnver < rnver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) RFALSE(h &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) (tb->lnum[h] != 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) tb->rnum[h] != 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) lrnver != 1 || rnver != 2 || lnver != 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) || h != 1), "vs-8230: bad h");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (lrset == LR_SHIFT_FLOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) set_parameters(tb, h, tb->lnum[h], tb->rnum[h],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) lrnver, snum012 + lrset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) tb->lbytes, tb->rbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) set_parameters(tb, h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) tb->lnum[h] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) ((tb->lbytes == -1) ? 0 : 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) tb->rnum[h] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) ((tb->rbytes == -1) ? 0 : 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) lrnver, snum012 + lrset, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) * if shifting doesn't lead to better packing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) * then don't shift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) if (nver == lrnver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) set_parameters(tb, h, 0, 0, nver, snum012 + nset, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * now we know that for better packing shifting in only one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) * direction either to the left or to the right is required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) * if shifting to the left is better than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) * shifting to the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) if (lnver < rnver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) SET_PAR_SHIFT_LEFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) * if shifting to the right is better than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) * shifting to the left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (lnver > rnver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) SET_PAR_SHIFT_RIGHT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * now shifting in either direction gives the same number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * of nodes and we can make use of the cached neighbors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (is_left_neighbor_in_cache(tb, h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) SET_PAR_SHIFT_LEFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) * shift to the right independently on whether the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * right neighbor in cache or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) SET_PAR_SHIFT_RIGHT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) * Check whether current node S[h] is balanced when Decreasing its size by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) * Deleting or Cutting for INTERNAL node of S+tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) * Calculate parameters for balancing for current level h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) * Parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) * tb tree_balance structure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) * h current level of the node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) * inum item number in S[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) * mode i - insert, p - paste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) * Returns: 1 - schedule occurred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) * 0 - balancing for higher levels needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) * -1 - no balancing for higher levels needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * -2 - no disk space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * Note: Items of internal nodes have fixed size, so the balance condition for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) * the internal part of S+tree is as for the B-trees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) static int dc_check_balance_internal(struct tree_balance *tb, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) struct virtual_node *vn = tb->tb_vn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) * Sh is the node whose balance is currently being checked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) * and Fh is its father.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) struct buffer_head *Sh, *Fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) int lfree, rfree /* free space in L and R */ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) Sh = PATH_H_PBUFFER(tb->tb_path, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) Fh = PATH_H_PPARENT(tb->tb_path, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * using tb->insert_size[h], which is negative in this case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * create_virtual_node calculates:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * new_nr_item = number of items node would have if operation is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) * performed without balancing (new_nr_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) create_virtual_node(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) if (!Fh) { /* S[h] is the root. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) /* no balancing for higher levels needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (vn->vn_nr_item > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) return NO_BALANCING_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) * new_nr_item == 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) * Current root will be deleted resulting in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) * decrementing the tree height.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) set_parameters(tb, h, 0, 0, 0, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if ((ret = get_parents(tb, h)) != CARRY_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) /* get free space of neighbors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) rfree = get_rfree(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) lfree = get_lfree(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) /* determine maximal number of items we can fit into neighbors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) check_left(tb, h, lfree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) check_right(tb, h, rfree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) * Balance condition for the internal node is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * In this case we balance only if it leads to better packing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) if (vn->vn_nr_item >= MIN_NR_KEY(Sh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * Here we join S[h] with one of its neighbors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) * which is impossible with greater values of new_nr_item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (vn->vn_nr_item == MIN_NR_KEY(Sh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) /* All contents of S[h] can be moved to L[h]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) if (tb->lnum[h] >= vn->vn_nr_item + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) int order_L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) order_L =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) ((n =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) PATH_H_B_ITEM_ORDER(tb->tb_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) h)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) n = dc_size(B_N_CHILD(tb->FL[h], order_L)) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) (DC_SIZE + KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) set_parameters(tb, h, -n - 1, 0, 0, NULL, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) /* All contents of S[h] can be moved to R[h]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if (tb->rnum[h] >= vn->vn_nr_item + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) int order_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) order_R =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) ((n =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) PATH_H_B_ITEM_ORDER(tb->tb_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) h)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) B_NR_ITEMS(Fh)) ? 0 : n + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) n = dc_size(B_N_CHILD(tb->FR[h], order_R)) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) (DC_SIZE + KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) set_parameters(tb, h, 0, -n - 1, 0, NULL, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) * All contents of S[h] can be moved to the neighbors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) * (L[h] & R[h]).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) int to_r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) to_r =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) tb->rnum[h] + vn->vn_nr_item + 1) / 2 -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) (MAX_NR_KEY(Sh) + 1 - tb->rnum[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 0, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) /* Balancing does not lead to better packing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) return NO_BALANCING_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) * Current node contain insufficient number of items.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * Balancing is required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) /* Check whether we can merge S[h] with left neighbor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (tb->lnum[h] >= vn->vn_nr_item + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) if (is_left_neighbor_in_cache(tb, h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) || tb->rnum[h] < vn->vn_nr_item + 1 || !tb->FR[h]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) int order_L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) order_L =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) ((n =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) PATH_H_B_ITEM_ORDER(tb->tb_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) h)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 0) ? B_NR_ITEMS(tb->FL[h]) : n - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) n = dc_size(B_N_CHILD(tb->FL[h], order_L)) / (DC_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) set_parameters(tb, h, -n - 1, 0, 0, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) /* Check whether we can merge S[h] with right neighbor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) if (tb->rnum[h] >= vn->vn_nr_item + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) int order_R;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) order_R =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) ((n =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) PATH_H_B_ITEM_ORDER(tb->tb_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) h)) == B_NR_ITEMS(Fh)) ? 0 : (n + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) n = dc_size(B_N_CHILD(tb->FR[h], order_R)) / (DC_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) set_parameters(tb, h, 0, -n - 1, 0, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) /* All contents of S[h] can be moved to the neighbors (L[h] & R[h]). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) if (tb->rnum[h] + tb->lnum[h] >= vn->vn_nr_item + 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) int to_r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) to_r =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) ((MAX_NR_KEY(Sh) << 1) + 2 - tb->lnum[h] - tb->rnum[h] +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) vn->vn_nr_item + 1) / 2 - (MAX_NR_KEY(Sh) + 1 -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) tb->rnum[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) set_parameters(tb, h, vn->vn_nr_item + 1 - to_r, to_r, 0, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) /* For internal nodes try to borrow item from a neighbor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) RFALSE(!tb->FL[h] && !tb->FR[h], "vs-8235: trying to borrow for root");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) /* Borrow one or two items from caching neighbor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) if (is_left_neighbor_in_cache(tb, h) || !tb->FR[h]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) int from_l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) from_l =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) (MAX_NR_KEY(Sh) + 1 - tb->lnum[h] + vn->vn_nr_item +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 1) / 2 - (vn->vn_nr_item + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) set_parameters(tb, h, -from_l, 0, 1, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) set_parameters(tb, h, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) -((MAX_NR_KEY(Sh) + 1 - tb->rnum[h] + vn->vn_nr_item +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 1) / 2 - (vn->vn_nr_item + 1)), 1, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) * Check whether current node S[h] is balanced when Decreasing its size by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * Deleting or Truncating for LEAF node of S+tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) * Calculate parameters for balancing for current level h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) * Parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) * tb tree_balance structure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) * h current level of the node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * inum item number in S[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * mode i - insert, p - paste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * Returns: 1 - schedule occurred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * 0 - balancing for higher levels needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) * -1 - no balancing for higher levels needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) * -2 - no disk space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) static int dc_check_balance_leaf(struct tree_balance *tb, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) struct virtual_node *vn = tb->tb_vn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) * Number of bytes that must be deleted from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) * (value is negative if bytes are deleted) buffer which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) * contains node being balanced. The mnemonic is that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) * attempted change in node space used level is levbytes bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) int levbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) /* the maximal item size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) int maxsize, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) * S0 is the node whose balance is currently being checked,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) * and F0 is its father.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) struct buffer_head *S0, *F0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) int lfree, rfree /* free space in L and R */ ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) S0 = PATH_H_PBUFFER(tb->tb_path, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) F0 = PATH_H_PPARENT(tb->tb_path, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) levbytes = tb->insert_size[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) maxsize = MAX_CHILD_SIZE(S0); /* maximal possible size of an item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (!F0) { /* S[0] is the root now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) RFALSE(-levbytes >= maxsize - B_FREE_SPACE(S0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) "vs-8240: attempt to create empty buffer tree");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) return NO_BALANCING_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if ((ret = get_parents(tb, h)) != CARRY_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) /* get free space of neighbors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) rfree = get_rfree(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) lfree = get_lfree(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) create_virtual_node(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) /* if 3 leaves can be merge to one, set parameters and return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if (are_leaves_removable(tb, lfree, rfree))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) * determine maximal number of items we can shift to the left/right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) * neighbor and the maximal number of bytes that can flow to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * left/right neighbor from the left/right most liquid item that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) * cannot be shifted from S[0] entirely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) check_left(tb, h, lfree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) check_right(tb, h, rfree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) /* check whether we can merge S with left neighbor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) if (tb->lnum[0] >= vn->vn_nr_item && tb->lbytes == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (is_left_neighbor_in_cache(tb, h) || ((tb->rnum[0] - ((tb->rbytes == -1) ? 0 : 1)) < vn->vn_nr_item) || /* S can not be merged with R */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) !tb->FR[h]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) RFALSE(!tb->FL[h],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) "vs-8245: dc_check_balance_leaf: FL[h] must exist");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) /* set parameter to merge S[0] with its left neighbor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) set_parameters(tb, h, -1, 0, 0, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) /* check whether we can merge S[0] with right neighbor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) if (tb->rnum[0] >= vn->vn_nr_item && tb->rbytes == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) set_parameters(tb, h, 0, -1, 0, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) * All contents of S[0] can be moved to the neighbors (L[0] & R[0]).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) * Set parameters and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (is_leaf_removable(tb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) /* Balancing is not required. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) tb->s0num = vn->vn_nr_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) return NO_BALANCING_NEEDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) * Check whether current node S[h] is balanced when Decreasing its size by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) * Deleting or Cutting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) * Calculate parameters for balancing for current level h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * Parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) * tb tree_balance structure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) * h current level of the node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) * inum item number in S[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) * mode d - delete, c - cut.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) * Returns: 1 - schedule occurred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) * 0 - balancing for higher levels needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) * -1 - no balancing for higher levels needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) * -2 - no disk space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) static int dc_check_balance(struct tree_balance *tb, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) RFALSE(!(PATH_H_PBUFFER(tb->tb_path, h)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) "vs-8250: S is not initialized");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) if (h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) return dc_check_balance_internal(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) return dc_check_balance_leaf(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) * Check whether current node S[h] is balanced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) * Calculate parameters for balancing for current level h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) * Parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) * tb tree_balance structure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * tb is a large structure that must be read about in the header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * file at the same time as this procedure if the reader is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) * to successfully understand this procedure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) * h current level of the node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) * inum item number in S[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) * mode i - insert, p - paste, d - delete, c - cut.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) * Returns: 1 - schedule occurred;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) * 0 - balancing for higher levels needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) * -1 - no balancing for higher levels needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) * -2 - no disk space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) static int check_balance(int mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) int h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) int inum,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) int pos_in_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) struct item_head *ins_ih, const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) struct virtual_node *vn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) vn = tb->tb_vn = (struct virtual_node *)(tb->vn_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) vn->vn_free_ptr = (char *)(tb->tb_vn + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) vn->vn_mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) vn->vn_affected_item_num = inum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) vn->vn_pos_in_item = pos_in_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) vn->vn_ins_ih = ins_ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) vn->vn_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) RFALSE(mode == M_INSERT && !vn->vn_ins_ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) "vs-8255: ins_ih can not be 0 in insert mode");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) /* Calculate balance parameters when size of node is increasing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) if (tb->insert_size[h] > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) return ip_check_balance(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) /* Calculate balance parameters when size of node is decreasing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) return dc_check_balance(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) /* Check whether parent at the path is the really parent of the current node.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) static int get_direct_parent(struct tree_balance *tb, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) struct treepath *path = tb->tb_path;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) int position,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) /* We are in the root or in the new root. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) "PAP-8260: invalid offset in the path");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (PATH_OFFSET_PBUFFER(path, FIRST_PATH_ELEMENT_OFFSET)->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) b_blocknr == SB_ROOT_BLOCK(tb->tb_sb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /* Root is not changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) PATH_OFFSET_PBUFFER(path, path_offset - 1) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) PATH_OFFSET_POSITION(path, path_offset - 1) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) /* Root is changed and we must recalculate the path. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) /* Parent in the path is not in the tree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) if (!B_IS_IN_TREE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) (bh = PATH_OFFSET_PBUFFER(path, path_offset - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) if ((position =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) PATH_OFFSET_POSITION(path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) path_offset - 1)) > B_NR_ITEMS(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) /* Parent in the path is not parent of the current node in the tree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) if (B_N_CHILD_NUM(bh, position) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) PATH_OFFSET_PBUFFER(path, path_offset)->b_blocknr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) if (buffer_locked(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) int depth = reiserfs_write_unlock_nested(tb->tb_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) __wait_on_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) reiserfs_write_lock_nested(tb->tb_sb, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) if (FILESYSTEM_CHANGED_TB(tb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) * Parent in the path is unlocked and really parent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) * of the current node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) * Using lnum[h] and rnum[h] we should determine what neighbors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) * of S[h] we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) * need in order to balance S[h], and get them if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) * CARRY_ON - schedule didn't occur while the function worked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) static int get_neighbors(struct tree_balance *tb, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) int child_position,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) unsigned long son_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) struct super_block *sb = tb->tb_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) PROC_INFO_INC(sb, get_neighbors[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (tb->lnum[h]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) /* We need left neighbor to balance S[h]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) PROC_INFO_INC(sb, need_l_neighbor[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) RFALSE(bh == tb->FL[h] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) !PATH_OFFSET_POSITION(tb->tb_path, path_offset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) "PAP-8270: invalid position in the parent");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) child_position =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) (bh ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) FL[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) son_number = B_N_CHILD_NUM(tb->FL[h], child_position);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) depth = reiserfs_write_unlock_nested(tb->tb_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) bh = sb_bread(sb, son_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) reiserfs_write_lock_nested(tb->tb_sb, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) if (!bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) return IO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) if (FILESYSTEM_CHANGED_TB(tb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) PROC_INFO_INC(sb, get_neighbors_restart[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) RFALSE(!B_IS_IN_TREE(tb->FL[h]) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) child_position > B_NR_ITEMS(tb->FL[h]) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) B_N_CHILD_NUM(tb->FL[h], child_position) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) bh->b_blocknr, "PAP-8275: invalid parent");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) RFALSE(!B_IS_IN_TREE(bh), "PAP-8280: invalid child");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) RFALSE(!h &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) B_FREE_SPACE(bh) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) MAX_CHILD_SIZE(bh) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) dc_size(B_N_CHILD(tb->FL[0], child_position)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) "PAP-8290: invalid child size of left neighbor");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) brelse(tb->L[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) tb->L[h] = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) /* We need right neighbor to balance S[path_offset]. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) if (tb->rnum[h]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) PROC_INFO_INC(sb, need_r_neighbor[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) RFALSE(bh == tb->FR[h] &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) PATH_OFFSET_POSITION(tb->tb_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) path_offset) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) B_NR_ITEMS(bh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) "PAP-8295: invalid position in the parent");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) child_position =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) (bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) son_number = B_N_CHILD_NUM(tb->FR[h], child_position);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) depth = reiserfs_write_unlock_nested(tb->tb_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) bh = sb_bread(sb, son_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) reiserfs_write_lock_nested(tb->tb_sb, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) if (!bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) return IO_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) if (FILESYSTEM_CHANGED_TB(tb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) PROC_INFO_INC(sb, get_neighbors_restart[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) brelse(tb->R[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) tb->R[h] = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) RFALSE(!h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) && B_FREE_SPACE(bh) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) MAX_CHILD_SIZE(bh) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) dc_size(B_N_CHILD(tb->FR[0], child_position)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) "PAP-8300: invalid child size of right neighbor (%d != %d - %d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) B_FREE_SPACE(bh), MAX_CHILD_SIZE(bh),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) dc_size(B_N_CHILD(tb->FR[0], child_position)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) static int get_virtual_node_size(struct super_block *sb, struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) int max_num_of_items;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) int max_num_of_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) unsigned long blocksize = sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) #define MIN_NAME_LEN 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) max_num_of_items = (blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) max_num_of_entries = (blocksize - BLKH_SIZE - IH_SIZE) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) (DEH_SIZE + MIN_NAME_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) return sizeof(struct virtual_node) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) max(max_num_of_items * sizeof(struct virtual_item),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) sizeof(struct virtual_item) + sizeof(struct direntry_uarea) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) (max_num_of_entries - 1) * sizeof(__u16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) * maybe we should fail balancing we are going to perform when kmalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) * fails several times. But now it will loop until kmalloc gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) * required memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) static int get_mem_for_virtual_node(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) int check_fs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) size = get_virtual_node_size(tb->tb_sb, PATH_PLAST_BUFFER(tb->tb_path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) /* we have to allocate more memory for virtual node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) if (size > tb->vn_buf_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) if (tb->vn_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) /* free memory allocated before */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) kfree(tb->vn_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) /* this is not needed if kfree is atomic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) check_fs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) /* virtual node requires now more memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) tb->vn_buf_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) /* get memory for virtual item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) buf = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) * getting memory with GFP_KERNEL priority may involve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) * balancing now (due to indirect_to_direct conversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) * on dcache shrinking). So, release path and collected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) * resources here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) free_buffers_in_tb(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) buf = kmalloc(size, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) tb->vn_buf_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) tb->vn_buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) tb->vn_buf = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) if (check_fs && FILESYSTEM_CHANGED_TB(tb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) static void tb_buffer_sanity_check(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) struct buffer_head *bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) const char *descr, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) if (bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) if (atomic_read(&(bh->b_count)) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) reiserfs_panic(sb, "jmacd-1", "negative or zero "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) "reference counter for buffer %s[%d] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) "(%b)", descr, level, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (!buffer_uptodate(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) reiserfs_panic(sb, "jmacd-2", "buffer is not up "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) "to date %s[%d] (%b)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) descr, level, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) if (!B_IS_IN_TREE(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) reiserfs_panic(sb, "jmacd-3", "buffer is not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) "in tree %s[%d] (%b)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) descr, level, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) if (bh->b_bdev != sb->s_bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) reiserfs_panic(sb, "jmacd-4", "buffer has wrong "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) "device %s[%d] (%b)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) descr, level, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) if (bh->b_size != sb->s_blocksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) reiserfs_panic(sb, "jmacd-5", "buffer has wrong "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) "blocksize %s[%d] (%b)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) descr, level, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) if (bh->b_blocknr > SB_BLOCK_COUNT(sb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) reiserfs_panic(sb, "jmacd-6", "buffer block "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) "number too high %s[%d] (%b)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) descr, level, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) static void tb_buffer_sanity_check(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) struct buffer_head *bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) const char *descr, int level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) {;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) static int clear_all_dirty_bits(struct super_block *s, struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) return reiserfs_prepare_for_journal(s, bh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) struct buffer_head *locked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) int repeat_counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) locked = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) for (i = tb->tb_path->path_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) !locked && i > ILLEGAL_PATH_ELEMENT_OFFSET; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) if (PATH_OFFSET_PBUFFER(tb->tb_path, i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) * if I understand correctly, we can only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) * be sure the last buffer in the path is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) * in the tree --clm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) if (PATH_PLAST_BUFFER(tb->tb_path) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) PATH_OFFSET_PBUFFER(tb->tb_path, i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) tb_buffer_sanity_check(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) PATH_OFFSET_PBUFFER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) (tb->tb_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) i), "S",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) tb->tb_path->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) path_length - i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) if (!clear_all_dirty_bits(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) PATH_OFFSET_PBUFFER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) (tb->tb_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) i))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) locked =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) PATH_OFFSET_PBUFFER(tb->tb_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) for (i = 0; !locked && i < MAX_HEIGHT && tb->insert_size[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) if (tb->lnum[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) if (tb->L[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) tb_buffer_sanity_check(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) tb->L[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) "L", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) if (!clear_all_dirty_bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) (tb->tb_sb, tb->L[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) locked = tb->L[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) if (!locked && tb->FL[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) tb_buffer_sanity_check(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) tb->FL[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) "FL", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) if (!clear_all_dirty_bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) (tb->tb_sb, tb->FL[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) locked = tb->FL[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) if (!locked && tb->CFL[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) tb_buffer_sanity_check(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) tb->CFL[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) "CFL", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) if (!clear_all_dirty_bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) (tb->tb_sb, tb->CFL[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) locked = tb->CFL[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) if (!locked && (tb->rnum[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if (tb->R[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) tb_buffer_sanity_check(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) tb->R[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) "R", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) if (!clear_all_dirty_bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) (tb->tb_sb, tb->R[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) locked = tb->R[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) if (!locked && tb->FR[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) tb_buffer_sanity_check(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) tb->FR[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) "FR", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) if (!clear_all_dirty_bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) (tb->tb_sb, tb->FR[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) locked = tb->FR[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) if (!locked && tb->CFR[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) tb_buffer_sanity_check(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) tb->CFR[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) "CFR", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) if (!clear_all_dirty_bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) (tb->tb_sb, tb->CFR[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) locked = tb->CFR[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) * as far as I can tell, this is not required. The FEB list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) * seems to be full of newly allocated nodes, which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) * never be locked, dirty, or anything else.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) * To be safe, I'm putting in the checks and waits in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) * For the moment, they are needed to keep the code in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) * journal.c from complaining about the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) * That code is inside CONFIG_REISERFS_CHECK as well. --clm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) for (i = 0; !locked && i < MAX_FEB_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (tb->FEB[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) if (!clear_all_dirty_bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) (tb->tb_sb, tb->FEB[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) locked = tb->FEB[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) if (locked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) int depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) repeat_counter++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) if ((repeat_counter % 10000) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) reiserfs_warning(tb->tb_sb, "reiserfs-8200",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) "too many iterations waiting "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) "for buffer to unlock "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) "(%b)", locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) /* Don't loop forever. Try to recover from possible error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) return (FILESYSTEM_CHANGED_TB(tb)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) REPEAT_SEARCH : CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) depth = reiserfs_write_unlock_nested(tb->tb_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) __wait_on_buffer(locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) reiserfs_write_lock_nested(tb->tb_sb, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) if (FILESYSTEM_CHANGED_TB(tb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) } while (locked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) * Prepare for balancing, that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) * get all necessary parents, and neighbors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) * analyze what and where should be moved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) * get sufficient number of new nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) * Balancing will start only after all resources will be collected at a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) * When ported to SMP kernels, only at the last moment after all needed nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) * are collected in cache, will the resources be locked using the usual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) * textbook ordered lock acquisition algorithms. Note that ensuring that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) * this code neither write locks what it does not need to write lock nor locks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) * out of order will be a pain in the butt that could have been avoided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) * Grumble grumble. -Hans
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) * fix is meant in the sense of render unchanging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) * Latency might be improved by first gathering a list of what buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) * are needed and then getting as many of them in parallel as possible? -Hans
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) * Parameters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) * op_mode i - insert, d - delete, c - cut (truncate), p - paste (append)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) * tb tree_balance structure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) * inum item number in S[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) * pos_in_item - comment this if you can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) * ins_ih item head of item being inserted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) * data inserted item or data to be pasted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) * Returns: 1 - schedule occurred while the function worked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) * 0 - schedule didn't occur while the function worked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) * -1 - if no_disk_space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) int fix_nodes(int op_mode, struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) struct item_head *ins_ih, const void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) int ret, h, item_num = PATH_LAST_POSITION(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) int pos_in_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) * we set wait_tb_buffers_run when we have to restore any dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) * bits cleared during wait_tb_buffers_run
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) int wait_tb_buffers_run = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) ++REISERFS_SB(tb->tb_sb)->s_fix_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) pos_in_item = tb->tb_path->pos_in_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) tb->fs_gen = get_generation(tb->tb_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) * we prepare and log the super here so it will already be in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) * transaction when do_balance needs to change it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) * This way do_balance won't have to schedule when trying to prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) * the super for logging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) reiserfs_prepare_for_journal(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) SB_BUFFER_WITH_SB(tb->tb_sb), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) journal_mark_dirty(tb->transaction_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) SB_BUFFER_WITH_SB(tb->tb_sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) if (FILESYSTEM_CHANGED_TB(tb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) /* if it possible in indirect_to_direct conversion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) if (buffer_locked(tbS0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) int depth = reiserfs_write_unlock_nested(tb->tb_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) __wait_on_buffer(tbS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) reiserfs_write_lock_nested(tb->tb_sb, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) if (FILESYSTEM_CHANGED_TB(tb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) if (REISERFS_SB(tb->tb_sb)->cur_tb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) print_cur_tb("fix_nodes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) reiserfs_panic(tb->tb_sb, "PAP-8305",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) "there is pending do_balance");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) if (!buffer_uptodate(tbS0) || !B_IS_IN_TREE(tbS0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) reiserfs_panic(tb->tb_sb, "PAP-8320", "S[0] (%b %z) is "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) "not uptodate at the beginning of fix_nodes "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) "or not in tree (mode %c)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) tbS0, tbS0, op_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) /* Check parameters. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) switch (op_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) case M_INSERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) if (item_num <= 0 || item_num > B_NR_ITEMS(tbS0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) reiserfs_panic(tb->tb_sb, "PAP-8330", "Incorrect "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) "item number %d (in S0 - %d) in case "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) "of insert", item_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) B_NR_ITEMS(tbS0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) case M_PASTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) case M_DELETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) case M_CUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) if (item_num < 0 || item_num >= B_NR_ITEMS(tbS0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) print_block(tbS0, 0, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) reiserfs_panic(tb->tb_sb, "PAP-8335", "Incorrect "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) "item number(%d); mode = %c "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) "insert_size = %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) item_num, op_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) reiserfs_panic(tb->tb_sb, "PAP-8340", "Incorrect mode "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) "of operation");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) if (get_mem_for_virtual_node(tb) == REPEAT_SEARCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) /* FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) return REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) /* Starting from the leaf level; for all levels h of the tree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) for (h = 0; h < MAX_HEIGHT && tb->insert_size[h]; h++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) ret = get_direct_parent(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (ret != CARRY_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) ret = check_balance(op_mode, tb, h, item_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) pos_in_item, ins_ih, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) if (ret != CARRY_ON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) if (ret == NO_BALANCING_NEEDED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) /* No balancing for higher levels needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) ret = get_neighbors(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) if (ret != CARRY_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) if (h != MAX_HEIGHT - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) tb->insert_size[h + 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) * ok, analysis and resource gathering
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) * are complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) ret = get_neighbors(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) if (ret != CARRY_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) * No disk space, or schedule occurred and analysis may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) * invalid and needs to be redone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) ret = get_empty_nodes(tb, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) if (ret != CARRY_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) * We have a positive insert size but no nodes exist on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) * level, this means that we are creating a new root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) if (!PATH_H_PBUFFER(tb->tb_path, h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) RFALSE(tb->blknum[h] != 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) "PAP-8350: creating new empty root");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) if (h < MAX_HEIGHT - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) tb->insert_size[h + 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) } else if (!PATH_H_PBUFFER(tb->tb_path, h + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) * The tree needs to be grown, so this node S[h]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) * which is the root node is split into two nodes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) * and a new node (S[h+1]) will be created to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) * become the root node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) if (tb->blknum[h] > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) RFALSE(h == MAX_HEIGHT - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) "PAP-8355: attempt to create too high of a tree");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) tb->insert_size[h + 1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) (DC_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) KEY_SIZE) * (tb->blknum[h] - 1) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) DC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) } else if (h < MAX_HEIGHT - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) tb->insert_size[h + 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) tb->insert_size[h + 1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) (DC_SIZE + KEY_SIZE) * (tb->blknum[h] - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) ret = wait_tb_buffers_until_unlocked(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) if (ret == CARRY_ON) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) if (FILESYSTEM_CHANGED_TB(tb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) wait_tb_buffers_run = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) ret = REPEAT_SEARCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) return CARRY_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) wait_tb_buffers_run = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) * fix_nodes was unable to perform its calculation due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) * filesystem got changed under us, lack of free disk space or i/o
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) * failure. If the first is the case - the search will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) * repeated. For now - free all resources acquired so far except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) * for the new allocated nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) /* Release path buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) if (wait_tb_buffers_run) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) pathrelse_and_restore(tb->tb_sb, tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) pathrelse(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) /* brelse all resources collected for balancing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) for (i = 0; i < MAX_HEIGHT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) if (wait_tb_buffers_run) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) reiserfs_restore_prepared_buffer(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) tb->L[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) reiserfs_restore_prepared_buffer(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) tb->R[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) reiserfs_restore_prepared_buffer(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) tb->FL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) reiserfs_restore_prepared_buffer(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) tb->FR[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) reiserfs_restore_prepared_buffer(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) tb->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) CFL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) reiserfs_restore_prepared_buffer(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) tb->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) CFR[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) brelse(tb->L[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) brelse(tb->R[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) brelse(tb->FL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) brelse(tb->FR[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) brelse(tb->CFL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) brelse(tb->CFR[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) tb->L[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) tb->R[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) tb->FL[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) tb->FR[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) tb->CFL[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) tb->CFR[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) if (wait_tb_buffers_run) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) for (i = 0; i < MAX_FEB_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) if (tb->FEB[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) reiserfs_restore_prepared_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) (tb->tb_sb, tb->FEB[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) void unfix_nodes(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) /* Release path buffers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) pathrelse_and_restore(tb->tb_sb, tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) /* brelse all resources collected for balancing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) for (i = 0; i < MAX_HEIGHT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) reiserfs_restore_prepared_buffer(tb->tb_sb, tb->L[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) reiserfs_restore_prepared_buffer(tb->tb_sb, tb->R[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) reiserfs_restore_prepared_buffer(tb->tb_sb, tb->FR[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) reiserfs_restore_prepared_buffer(tb->tb_sb, tb->CFL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) reiserfs_restore_prepared_buffer(tb->tb_sb, tb->CFR[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) brelse(tb->L[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) brelse(tb->R[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) brelse(tb->FL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) brelse(tb->FR[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) brelse(tb->CFL[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) brelse(tb->CFR[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) /* deal with list of allocated (used and unused) nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) for (i = 0; i < MAX_FEB_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) if (tb->FEB[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) b_blocknr_t blocknr = tb->FEB[i]->b_blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) * de-allocated block which was not used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) * balancing and bforget about buffer for it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) brelse(tb->FEB[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) reiserfs_free_block(tb->transaction_handle, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) blocknr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) if (tb->used[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) /* release used as new nodes including a new root */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) brelse(tb->used[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) kfree(tb->vn_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) }