^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Now we have all buffers that must be used in balancing of the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Further calculations can not cause schedule(), and thus the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * tree will be stable until the balancing will be finished
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * balance the tree according to the analysis made before,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * and using buffers obtained after all above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "reiserfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static inline void buffer_info_init_left(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct buffer_info *bi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) bi->tb = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) bi->bi_bh = tb->L[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) bi->bi_parent = tb->FL[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) bi->bi_position = get_left_neighbor_position(tb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static inline void buffer_info_init_right(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct buffer_info *bi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) bi->tb = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) bi->bi_bh = tb->R[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) bi->bi_parent = tb->FR[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) bi->bi_position = get_right_neighbor_position(tb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static inline void buffer_info_init_tbS0(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct buffer_info *bi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) bi->tb = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) bi->bi_position = PATH_H_POSITION(tb->tb_path, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static inline void buffer_info_init_bh(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct buffer_info *bi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) bi->tb = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) bi->bi_bh = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) bi->bi_parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) bi->bi_position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) inline void do_balance_mark_leaf_dirty(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct buffer_head *bh, int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) journal_mark_dirty(tb->transaction_handle, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * summary:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * if deleting something ( tb->insert_size[0] < 0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * return(balance_leaf_when_delete()); (flag d handled here)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * if lnum is larger than 0 we put items into the left node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * if rnum is larger than 0 we put items into the right node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * if snum1 is larger than 0 we put items into the new node s1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * if snum2 is larger than 0 we put items into the new node s2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Note that all *num* count new items being created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static void balance_leaf_when_delete_del(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) int item_pos = PATH_LAST_POSITION(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) struct item_head *ih = item_head(tbS0, item_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) RFALSE(ih_item_len(ih) + IH_SIZE != -tb->insert_size[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) "vs-12013: mode Delete, insert size %d, ih to be deleted %h",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) -tb->insert_size[0], ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) buffer_info_init_tbS0(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) leaf_delete_items(&bi, 0, item_pos, 1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (!item_pos && tb->CFL[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (B_NR_ITEMS(tbS0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) if (!PATH_H_POSITION(tb->tb_path, 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) replace_key(tb, tb->CFL[0], tb->lkey[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) PATH_H_PPARENT(tb->tb_path, 0), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) RFALSE(!item_pos && !tb->CFL[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) "PAP-12020: tb->CFL[0]==%p, tb->L[0]==%p", tb->CFL[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) tb->L[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* cut item in S[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static void balance_leaf_when_delete_cut(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int item_pos = PATH_LAST_POSITION(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct item_head *ih = item_head(tbS0, item_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int pos_in_item = tb->tb_path->pos_in_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) buffer_info_init_tbS0(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (is_direntry_le_ih(ih)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * UFS unlink semantics are such that you can only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * delete one directory entry at a time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * when we cut a directory tb->insert_size[0] means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * number of entries to be cut (always 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) tb->insert_size[0] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) leaf_cut_from_buffer(&bi, item_pos, pos_in_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) -tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) RFALSE(!item_pos && !pos_in_item && !tb->CFL[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) "PAP-12030: can not change delimiting key. CFL[0]=%p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) tb->CFL[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (!item_pos && !pos_in_item && tb->CFL[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) leaf_cut_from_buffer(&bi, item_pos, pos_in_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) -tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) RFALSE(!ih_item_len(ih),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) "PAP-12035: cut must leave non-zero dynamic "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) "length of item");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static int balance_leaf_when_delete_left(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int n = B_NR_ITEMS(tbS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* L[0] must be joined with S[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (tb->lnum[0] == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* R[0] must be also joined with S[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (tb->rnum[0] == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (tb->FR[0] == PATH_H_PPARENT(tb->tb_path, 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * all contents of all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * 3 buffers will be in L[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (PATH_H_POSITION(tb->tb_path, 1) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 1 < B_NR_ITEMS(tb->FR[0]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) replace_key(tb, tb->CFL[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) tb->lkey[0], tb->FR[0], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) leaf_move_items(LEAF_FROM_S_TO_L, tb, n, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) leaf_move_items(LEAF_FROM_R_TO_L, tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) B_NR_ITEMS(tb->R[0]), -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) reiserfs_invalidate_buffer(tb, tbS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) reiserfs_invalidate_buffer(tb, tb->R[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* all contents of all the 3 buffers will be in R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) leaf_move_items(LEAF_FROM_S_TO_R, tb, n, -1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) leaf_move_items(LEAF_FROM_L_TO_R, tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) B_NR_ITEMS(tb->L[0]), -1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /* right_delimiting_key is correct in R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) reiserfs_invalidate_buffer(tb, tbS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) reiserfs_invalidate_buffer(tb, tb->L[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) RFALSE(tb->rnum[0] != 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) "PAP-12045: rnum must be 0 (%d)", tb->rnum[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* all contents of L[0] and S[0] will be in L[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) leaf_shift_left(tb, n, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) reiserfs_invalidate_buffer(tb, tbS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * a part of contents of S[0] will be in L[0] and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * the rest part of S[0] will be in R[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) RFALSE((tb->lnum[0] + tb->rnum[0] < n) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) (tb->lnum[0] + tb->rnum[0] > n + 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) "PAP-12050: rnum(%d) and lnum(%d) and item "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) "number(%d) in S[0] are not consistent",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) tb->rnum[0], tb->lnum[0], n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) RFALSE((tb->lnum[0] + tb->rnum[0] == n) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) (tb->lbytes != -1 || tb->rbytes != -1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) "PAP-12055: bad rbytes (%d)/lbytes (%d) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) "parameters when items are not split",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) tb->rbytes, tb->lbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) RFALSE((tb->lnum[0] + tb->rnum[0] == n + 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) (tb->lbytes < 1 || tb->rbytes != -1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) "PAP-12060: bad rbytes (%d)/lbytes (%d) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) "parameters when items are split",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) tb->rbytes, tb->lbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) reiserfs_invalidate_buffer(tb, tbS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) * Balance leaf node in case of delete or cut: insert_size[0] < 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * lnum, rnum can have values >= -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * -1 means that the neighbor must be joined with S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * 0 means that nothing should be done with the neighbor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * >0 means to shift entirely or partly the specified number of items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * to the neighbor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static int balance_leaf_when_delete(struct tree_balance *tb, int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) RFALSE(tb->FR[0] && B_LEVEL(tb->FR[0]) != DISK_LEAF_NODE_LEVEL + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) "vs- 12000: level: wrong FR %z", tb->FR[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) RFALSE(tb->blknum[0] > 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) "PAP-12005: tb->blknum == %d, can not be > 1", tb->blknum[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) RFALSE(!tb->blknum[0] && !PATH_H_PPARENT(tb->tb_path, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) "PAP-12010: tree can not be empty");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) buffer_info_init_tbS0(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* Delete or truncate the item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) BUG_ON(flag != M_DELETE && flag != M_CUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (flag == M_DELETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) balance_leaf_when_delete_del(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) else /* M_CUT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) balance_leaf_when_delete_cut(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * the rule is that no shifting occurs unless by shifting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * a node can be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) n = B_NR_ITEMS(tbS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* L[0] takes part in balancing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (tb->lnum[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return balance_leaf_when_delete_left(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (tb->rnum[0] == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* all contents of R[0] and S[0] will be in R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) leaf_shift_right(tb, n, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) reiserfs_invalidate_buffer(tb, tbS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) RFALSE(tb->rnum[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) "PAP-12065: bad rnum parameter must be 0 (%d)", tb->rnum[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static unsigned int balance_leaf_insert_left(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) struct item_head *const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) const char * const body)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int n = B_NR_ITEMS(tb->L[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) unsigned body_shift_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* part of new item falls into L[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) int new_item_len, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) ret = leaf_shift_left(tb, tb->lnum[0] - 1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* Calculate item length to insert to S[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) new_item_len = ih_item_len(ih) - tb->lbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /* Calculate and check item length to insert to L[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) put_ih_item_len(ih, ih_item_len(ih) - new_item_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) RFALSE(ih_item_len(ih) <= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) "PAP-12080: there is nothing to insert into L[0]: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) "ih_item_len=%d", ih_item_len(ih));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* Insert new item into L[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) buffer_info_init_left(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) min_t(int, tb->zeroes_num, ih_item_len(ih)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * Calculate key component, item length and body to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * insert into S[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (is_indirect_le_ih(ih))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) add_le_ih_k_offset(ih, tb->lbytes << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) put_ih_item_len(ih, new_item_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (tb->lbytes > tb->zeroes_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) body_shift_bytes = tb->lbytes - tb->zeroes_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) tb->zeroes_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) tb->zeroes_num -= tb->lbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) RFALSE(ih_item_len(ih) <= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) "PAP-12085: there is nothing to insert into S[0]: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) "ih_item_len=%d", ih_item_len(ih));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /* new item in whole falls into L[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* Shift lnum[0]-1 items to L[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ret = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) /* Insert new item into L[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) buffer_info_init_left(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) leaf_insert_into_buf(&bi, n + tb->item_pos - ret, ih, body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) tb->zeroes_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) tb->insert_size[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) tb->zeroes_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) return body_shift_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static void balance_leaf_paste_left_shift_dirent(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) const char * const body)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int n = B_NR_ITEMS(tb->L[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) RFALSE(tb->zeroes_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) "PAP-12090: invalid parameter in case of a directory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /* directory item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (tb->lbytes > tb->pos_in_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /* new directory entry falls into L[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct item_head *pasted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) int ret, l_pos_in_item = tb->pos_in_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * Shift lnum[0] - 1 items in whole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * Shift lbytes - 1 entries from given directory item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ret = leaf_shift_left(tb, tb->lnum[0], tb->lbytes - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (ret && !tb->item_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) pasted = item_head(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) l_pos_in_item += ih_entry_count(pasted) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) (tb->lbytes - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* Append given directory entry to directory item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) buffer_info_init_left(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) leaf_paste_in_buffer(&bi, n + tb->item_pos - ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) l_pos_in_item, tb->insert_size[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) body, tb->zeroes_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * previous string prepared space for pasting new entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * following string pastes this entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * when we have merge directory item, pos_in_item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * has been changed too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* paste new directory entry. 1 is entry number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) leaf_paste_entries(&bi, n + tb->item_pos - ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) l_pos_in_item, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) (struct reiserfs_de_head *) body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) body + DEH_SIZE, tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) tb->insert_size[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* new directory item doesn't fall into L[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * Shift lnum[0]-1 items in whole. Shift lbytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * directory entries from directory item number lnum[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* Calculate new position to append in item body */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) tb->pos_in_item -= tb->lbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static unsigned int balance_leaf_paste_left_shift(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) const char * const body)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) int n = B_NR_ITEMS(tb->L[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) int body_shift_bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) balance_leaf_paste_left_shift_dirent(tb, ih, body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) RFALSE(tb->lbytes <= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) "PAP-12095: there is nothing to shift to L[0]. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) "lbytes=%d", tb->lbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) "PAP-12100: incorrect position to paste: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) "item_len=%d, pos_in_item=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ih_item_len(item_head(tbS0, tb->item_pos)), tb->pos_in_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* appended item will be in L[0] in whole */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (tb->lbytes >= tb->pos_in_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) struct item_head *tbS0_pos_ih, *tbL0_ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) struct item_head *tbS0_0_ih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct reiserfs_key *left_delim_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) int ret, l_n, version, temp_l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) tbS0_pos_ih = item_head(tbS0, tb->item_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) tbS0_0_ih = item_head(tbS0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * this bytes number must be appended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * to the last item of L[h]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) l_n = tb->lbytes - tb->pos_in_item;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /* Calculate new insert_size[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) tb->insert_size[0] -= l_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) RFALSE(tb->insert_size[0] <= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) "PAP-12105: there is nothing to paste into "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) "L[0]. insert_size=%d", tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ret = leaf_shift_left(tb, tb->lnum[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ih_item_len(tbS0_pos_ih));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) tbL0_ih = item_head(tb->L[0], n + tb->item_pos - ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* Append to body of item in L[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) buffer_info_init_left(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) leaf_paste_in_buffer(&bi, n + tb->item_pos - ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ih_item_len(tbL0_ih), l_n, body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) min_t(int, l_n, tb->zeroes_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * 0-th item in S0 can be only of DIRECT type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * when l_n != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) temp_l = l_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) RFALSE(ih_item_len(tbS0_0_ih),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) "PAP-12106: item length must be 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) RFALSE(comp_short_le_keys(&tbS0_0_ih->ih_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) leaf_key(tb->L[0], n + tb->item_pos - ret)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) "PAP-12107: items must be of the same file");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (is_indirect_le_ih(tbL0_ih)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) int shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) temp_l = l_n << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) /* update key of first item in S0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) version = ih_version(tbS0_0_ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) add_le_key_k_offset(version, &tbS0_0_ih->ih_key, temp_l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* update left delimiting key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) left_delim_key = internal_key(tb->CFL[0], tb->lkey[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) add_le_key_k_offset(version, left_delim_key, temp_l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * Calculate new body, position in item and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * insert_size[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (l_n > tb->zeroes_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) body_shift_bytes = l_n - tb->zeroes_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) tb->zeroes_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) tb->zeroes_num -= l_n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) tb->pos_in_item = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) RFALSE(comp_short_le_keys(&tbS0_0_ih->ih_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) leaf_key(tb->L[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) B_NR_ITEMS(tb->L[0]) - 1)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) !op_is_left_mergeable(leaf_key(tbS0, 0), tbS0->b_size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) !op_is_left_mergeable(left_delim_key, tbS0->b_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) "PAP-12120: item must be merge-able with left "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) "neighboring item");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /* only part of the appended item will be in L[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /* Calculate position in item for append in S[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) tb->pos_in_item -= tb->lbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) RFALSE(tb->pos_in_item <= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) "PAP-12125: no place for paste. pos_in_item=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) tb->pos_in_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * Shift lnum[0] - 1 items in whole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * Shift lbytes - 1 byte from item number lnum[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return body_shift_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* appended item will be in L[0] in whole */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static void balance_leaf_paste_left_whole(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) const char * const body)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) int n = B_NR_ITEMS(tb->L[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct item_head *pasted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /* if we paste into first item of S[0] and it is left mergable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (!tb->item_pos &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) op_is_left_mergeable(leaf_key(tbS0, 0), tbS0->b_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * then increment pos_in_item by the size of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * last item in L[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) pasted = item_head(tb->L[0], n - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (is_direntry_le_ih(pasted))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) tb->pos_in_item += ih_entry_count(pasted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) tb->pos_in_item += ih_item_len(pasted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * Shift lnum[0] - 1 items in whole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * Shift lbytes - 1 byte from item number lnum[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) ret = leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* Append to body of item in L[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) buffer_info_init_left(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) leaf_paste_in_buffer(&bi, n + tb->item_pos - ret, tb->pos_in_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) tb->insert_size[0], body, tb->zeroes_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) /* if appended item is directory, paste entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) pasted = item_head(tb->L[0], n + tb->item_pos - ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (is_direntry_le_ih(pasted))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) leaf_paste_entries(&bi, n + tb->item_pos - ret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) tb->pos_in_item, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) (struct reiserfs_de_head *)body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) body + DEH_SIZE, tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * if appended item is indirect item, put unformatted node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * into un list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) if (is_indirect_le_ih(pasted))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) set_ih_free_space(pasted, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) tb->insert_size[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) tb->zeroes_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static unsigned int balance_leaf_paste_left(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) const char * const body)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /* we must shift the part of the appended item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (tb->item_pos == tb->lnum[0] - 1 && tb->lbytes != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return balance_leaf_paste_left_shift(tb, ih, body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) balance_leaf_paste_left_whole(tb, ih, body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* Shift lnum[0] items from S[0] to the left neighbor L[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static unsigned int balance_leaf_left(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) const char * const body, int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (tb->lnum[0] <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) /* new item or it part falls to L[0], shift it too */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) if (tb->item_pos < tb->lnum[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) BUG_ON(flag != M_INSERT && flag != M_PASTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (flag == M_INSERT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) return balance_leaf_insert_left(tb, ih, body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) else /* M_PASTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return balance_leaf_paste_left(tb, ih, body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /* new item doesn't fall into L[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static void balance_leaf_insert_right(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) const char * const body)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) int n = B_NR_ITEMS(tbS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) /* new item or part of it doesn't fall into R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (n - tb->rnum[0] >= tb->item_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) /* new item or its part falls to R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /* part of new item falls into R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (tb->item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) loff_t old_key_comp, old_len, r_zeroes_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) const char *r_body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) loff_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) leaf_shift_right(tb, tb->rnum[0] - 1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /* Remember key component and item length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) old_key_comp = le_ih_k_offset(ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) old_len = ih_item_len(ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * Calculate key component and item length to insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * into R[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (is_indirect_le_ih(ih))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) set_le_ih_k_offset(ih, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) put_ih_item_len(ih, tb->rbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /* Insert part of the item into R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) buffer_info_init_right(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if ((old_len - tb->rbytes) > tb->zeroes_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) r_zeroes_number = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) r_body = body + (old_len - tb->rbytes) - tb->zeroes_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) r_body = body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) r_zeroes_number = tb->zeroes_num -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) (old_len - tb->rbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) tb->zeroes_num -= r_zeroes_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeroes_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /* Replace right delimiting key by first key in R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * Calculate key component and item length to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * insert into S[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) set_le_ih_k_offset(ih, old_key_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) put_ih_item_len(ih, old_len - tb->rbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) tb->insert_size[0] -= tb->rbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /* whole new item falls into R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* Shift rnum[0]-1 items to R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* Insert new item into R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) buffer_info_init_right(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) leaf_insert_into_buf(&bi, tb->item_pos - n + tb->rnum[0] - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) ih, body, tb->zeroes_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (tb->item_pos - n + tb->rnum[0] - 1 == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) tb->zeroes_num = tb->insert_size[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) static void balance_leaf_paste_right_shift_dirent(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) const char * const body)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) int entry_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) RFALSE(tb->zeroes_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) "PAP-12145: invalid parameter in case of a directory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) entry_count = ih_entry_count(item_head(tbS0, tb->item_pos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) /* new directory entry falls into R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) if (entry_count - tb->rbytes < tb->pos_in_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) int paste_entry_position;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) RFALSE(tb->rbytes - 1 >= entry_count || !tb->insert_size[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) "PAP-12150: no enough of entries to shift to R[0]: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) "rbytes=%d, entry_count=%d", tb->rbytes, entry_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * Shift rnum[0]-1 items in whole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * Shift rbytes-1 directory entries from directory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * item number rnum[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) /* Paste given directory entry to directory item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) paste_entry_position = tb->pos_in_item - entry_count +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) tb->rbytes - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) buffer_info_init_right(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) leaf_paste_in_buffer(&bi, 0, paste_entry_position,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) tb->insert_size[0], body, tb->zeroes_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* paste entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) leaf_paste_entries(&bi, 0, paste_entry_position, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) (struct reiserfs_de_head *) body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) body + DEH_SIZE, tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* change delimiting keys */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) if (paste_entry_position == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) tb->insert_size[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) tb->pos_in_item++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /* new directory entry doesn't fall into R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static void balance_leaf_paste_right_shift(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) const char * const body)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) int n_shift, n_rem, r_zeroes_number, version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned long temp_rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) const char *r_body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /* we append to directory item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (is_direntry_le_ih(item_head(tbS0, tb->item_pos))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) balance_leaf_paste_right_shift_dirent(tb, ih, body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /* regular object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * Calculate number of bytes which must be shifted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * from appended item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) n_shift = tb->rbytes - tb->insert_size[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (n_shift < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) n_shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) "PAP-12155: invalid position to paste. ih_item_len=%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) "pos_in_item=%d", tb->pos_in_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ih_item_len(item_head(tbS0, tb->item_pos)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) leaf_shift_right(tb, tb->rnum[0], n_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * Calculate number of bytes which must remain in body
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * after appending to R[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) n_rem = tb->insert_size[0] - tb->rbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (n_rem < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) n_rem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) temp_rem = n_rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) version = ih_version(item_head(tb->R[0], 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (is_indirect_le_key(version, leaf_key(tb->R[0], 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) int shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) temp_rem = n_rem << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) add_le_key_k_offset(version, leaf_key(tb->R[0], 0), temp_rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) add_le_key_k_offset(version, internal_key(tb->CFR[0], tb->rkey[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) temp_rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) do_balance_mark_internal_dirty(tb, tb->CFR[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Append part of body into R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) buffer_info_init_right(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) if (n_rem > tb->zeroes_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) r_zeroes_number = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) r_body = body + n_rem - tb->zeroes_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) r_body = body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) r_zeroes_number = tb->zeroes_num - n_rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) tb->zeroes_num -= r_zeroes_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) r_body, r_zeroes_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (is_indirect_le_ih(item_head(tb->R[0], 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) set_ih_free_space(item_head(tb->R[0], 0), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) tb->insert_size[0] = n_rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (!n_rem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) tb->pos_in_item++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) static void balance_leaf_paste_right_whole(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) const char * const body)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) int n = B_NR_ITEMS(tbS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct item_head *pasted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) buffer_info_init_right(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) /* append item in R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (tb->pos_in_item >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) buffer_info_init_right(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->rnum[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) tb->pos_in_item, tb->insert_size[0], body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) tb->zeroes_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* paste new entry, if item is directory item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) pasted = item_head(tb->R[0], tb->item_pos - n + tb->rnum[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (is_direntry_le_ih(pasted) && tb->pos_in_item >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) leaf_paste_entries(&bi, tb->item_pos - n + tb->rnum[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) tb->pos_in_item, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) (struct reiserfs_de_head *)body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) body + DEH_SIZE, tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (!tb->pos_in_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) RFALSE(tb->item_pos - n + tb->rnum[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) "PAP-12165: directory item must be first "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) "item of node when pasting is in 0th position");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /* update delimiting keys */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (is_indirect_le_ih(pasted))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) set_ih_free_space(pasted, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) tb->zeroes_num = tb->insert_size[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) static void balance_leaf_paste_right(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) const char * const body)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) int n = B_NR_ITEMS(tbS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* new item doesn't fall into R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (n - tb->rnum[0] > tb->item_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /* pasted item or part of it falls to R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (tb->item_pos == n - tb->rnum[0] && tb->rbytes != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /* we must shift the part of the appended item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) balance_leaf_paste_right_shift(tb, ih, body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) /* pasted item in whole falls into R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) balance_leaf_paste_right_whole(tb, ih, body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /* shift rnum[0] items from S[0] to the right neighbor R[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) static void balance_leaf_right(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) const char * const body, int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) if (tb->rnum[0] <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) BUG_ON(flag != M_INSERT && flag != M_PASTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (flag == M_INSERT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) balance_leaf_insert_right(tb, ih, body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) else /* M_PASTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) balance_leaf_paste_right(tb, ih, body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static void balance_leaf_new_nodes_insert(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) const char * const body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) struct item_head *insert_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) struct buffer_head **insert_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) int n = B_NR_ITEMS(tbS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) /* new item or it part don't falls into S_new[i] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (n - tb->snum[i] >= tb->item_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) tb->snum[i], tb->sbytes[i], tb->S_new[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) /* new item or it's part falls to first new node S_new[i] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /* part of new item falls into S_new[i] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (tb->item_pos == n - tb->snum[i] + 1 && tb->sbytes[i] != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) int old_key_comp, old_len, r_zeroes_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) const char *r_body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) /* Move snum[i]-1 items from S[0] to S_new[i] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i] - 1, -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) tb->S_new[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /* Remember key component and item length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) old_key_comp = le_ih_k_offset(ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) old_len = ih_item_len(ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * Calculate key component and item length to insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * into S_new[i]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) if (is_indirect_le_ih(ih))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) set_le_ih_k_offset(ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) le_ih_k_offset(ih) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) ((old_len - tb->sbytes[i]) << shift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) put_ih_item_len(ih, tb->sbytes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /* Insert part of the item into S_new[i] before 0-th item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) buffer_info_init_bh(tb, &bi, tb->S_new[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if ((old_len - tb->sbytes[i]) > tb->zeroes_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) r_zeroes_number = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) r_body = body + (old_len - tb->sbytes[i]) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) tb->zeroes_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) r_body = body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) r_zeroes_number = tb->zeroes_num - (old_len -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) tb->sbytes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) tb->zeroes_num -= r_zeroes_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeroes_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * Calculate key component and item length to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * insert into S[i]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) set_le_ih_k_offset(ih, old_key_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) put_ih_item_len(ih, old_len - tb->sbytes[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) tb->insert_size[0] -= tb->sbytes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) /* whole new item falls into S_new[i] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * Shift snum[0] - 1 items to S_new[i]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * (sbytes[i] of split item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) tb->snum[i] - 1, tb->sbytes[i], tb->S_new[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) /* Insert new item into S_new[i] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) buffer_info_init_bh(tb, &bi, tb->S_new[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) leaf_insert_into_buf(&bi, tb->item_pos - n + tb->snum[i] - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) ih, body, tb->zeroes_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) tb->zeroes_num = tb->insert_size[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /* we append to directory item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static void balance_leaf_new_nodes_paste_dirent(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) const char * const body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct item_head *insert_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct buffer_head **insert_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct item_head *aux_ih = item_head(tbS0, tb->item_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) int entry_count = ih_entry_count(aux_ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (entry_count - tb->sbytes[i] < tb->pos_in_item &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) tb->pos_in_item <= entry_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /* new directory entry falls into S_new[i] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) RFALSE(!tb->insert_size[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) "PAP-12215: insert_size is already 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) RFALSE(tb->sbytes[i] - 1 >= entry_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) "PAP-12220: there are no so much entries (%d), only %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) tb->sbytes[i] - 1, entry_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * Shift snum[i]-1 items in whole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * Shift sbytes[i] directory entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * from directory item number snum[i]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) tb->sbytes[i] - 1, tb->S_new[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * Paste given directory entry to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * directory item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) buffer_info_init_bh(tb, &bi, tb->S_new[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) leaf_paste_in_buffer(&bi, 0, tb->pos_in_item - entry_count +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) tb->sbytes[i] - 1, tb->insert_size[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) body, tb->zeroes_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /* paste new directory entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) leaf_paste_entries(&bi, 0, tb->pos_in_item - entry_count +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) tb->sbytes[i] - 1, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) (struct reiserfs_de_head *) body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) body + DEH_SIZE, tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) tb->insert_size[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) tb->pos_in_item++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) /* new directory entry doesn't fall into S_new[i] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) tb->sbytes[i], tb->S_new[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) static void balance_leaf_new_nodes_paste_shift(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) const char * const body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct item_head *insert_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) struct buffer_head **insert_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct item_head *aux_ih = item_head(tbS0, tb->item_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) int n_shift, n_rem, r_zeroes_number, shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) const char *r_body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) struct item_head *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) RFALSE(ih, "PAP-12210: ih must be 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) if (is_direntry_le_ih(aux_ih)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) balance_leaf_new_nodes_paste_dirent(tb, ih, body, insert_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) insert_ptr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /* regular object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) RFALSE(tb->pos_in_item != ih_item_len(item_head(tbS0, tb->item_pos)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) tb->insert_size[0] <= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) "PAP-12225: item too short or insert_size <= 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * Calculate number of bytes which must be shifted from appended item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) n_shift = tb->sbytes[i] - tb->insert_size[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (n_shift < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) n_shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i], n_shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) tb->S_new[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * Calculate number of bytes which must remain in body after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * append to S_new[i]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) n_rem = tb->insert_size[0] - tb->sbytes[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) if (n_rem < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) n_rem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) /* Append part of body into S_new[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) buffer_info_init_bh(tb, &bi, tb->S_new[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (n_rem > tb->zeroes_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) r_zeroes_number = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) r_body = body + n_rem - tb->zeroes_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) r_body = body;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) r_zeroes_number = tb->zeroes_num - n_rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) tb->zeroes_num -= r_zeroes_number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) leaf_paste_in_buffer(&bi, 0, n_shift, tb->insert_size[0] - n_rem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) r_body, r_zeroes_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) tmp = item_head(tb->S_new[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (is_indirect_le_ih(tmp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) set_ih_free_space(tmp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) shift = tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) add_le_ih_k_offset(tmp, n_rem << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) tb->insert_size[0] = n_rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) if (!n_rem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) tb->pos_in_item++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static void balance_leaf_new_nodes_paste_whole(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) const char * const body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) struct item_head *insert_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct buffer_head **insert_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) int n = B_NR_ITEMS(tbS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) int leaf_mi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) struct item_head *pasted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) struct item_head *ih_check = item_head(tbS0, tb->item_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (!is_direntry_le_ih(ih_check) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) (tb->pos_in_item != ih_item_len(ih_check) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) tb->insert_size[0] <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) reiserfs_panic(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) "PAP-12235",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) "pos_in_item must be equal to ih_item_len");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, tb->snum[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) tb->sbytes[i], tb->S_new[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) RFALSE(leaf_mi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) "PAP-12240: unexpected value returned by leaf_move_items (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) leaf_mi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) /* paste into item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) buffer_info_init_bh(tb, &bi, tb->S_new[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) leaf_paste_in_buffer(&bi, tb->item_pos - n + tb->snum[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) tb->pos_in_item, tb->insert_size[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) body, tb->zeroes_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) pasted = item_head(tb->S_new[i], tb->item_pos - n +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) tb->snum[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (is_direntry_le_ih(pasted))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) leaf_paste_entries(&bi, tb->item_pos - n + tb->snum[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) tb->pos_in_item, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) (struct reiserfs_de_head *)body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) body + DEH_SIZE, tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* if we paste to indirect item update ih_free_space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) if (is_indirect_le_ih(pasted))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) set_ih_free_space(pasted, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) tb->zeroes_num = tb->insert_size[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) static void balance_leaf_new_nodes_paste(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) const char * const body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) struct item_head *insert_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct buffer_head **insert_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) int n = B_NR_ITEMS(tbS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) /* pasted item doesn't fall into S_new[i] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (n - tb->snum[i] > tb->item_pos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) tb->snum[i], tb->sbytes[i], tb->S_new[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) /* pasted item or part if it falls to S_new[i] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) if (tb->item_pos == n - tb->snum[i] && tb->sbytes[i] != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) /* we must shift part of the appended item */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) balance_leaf_new_nodes_paste_shift(tb, ih, body, insert_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) insert_ptr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /* item falls wholly into S_new[i] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) balance_leaf_new_nodes_paste_whole(tb, ih, body, insert_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) insert_ptr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /* Fill new nodes that appear in place of S[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) static void balance_leaf_new_nodes(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) const char * const body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct item_head *insert_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) struct buffer_head **insert_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) for (i = tb->blknum[0] - 2; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) BUG_ON(flag != M_INSERT && flag != M_PASTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) RFALSE(!tb->snum[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) "PAP-12200: snum[%d] == %d. Must be > 0", i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) tb->snum[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) /* here we shift from S to S_new nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) tb->S_new[i] = get_FEB(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) /* initialized block type and tree level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) set_blkh_level(B_BLK_HEAD(tb->S_new[i]), DISK_LEAF_NODE_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) if (flag == M_INSERT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) balance_leaf_new_nodes_insert(tb, ih, body, insert_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) insert_ptr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) else /* M_PASTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) balance_leaf_new_nodes_paste(tb, ih, body, insert_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) insert_ptr, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) memcpy(insert_key + i, leaf_key(tb->S_new[i], 0), KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) insert_ptr[i] = tb->S_new[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) RFALSE(!buffer_journaled(tb->S_new[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) || buffer_journal_dirty(tb->S_new[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) || buffer_dirty(tb->S_new[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) "PAP-12247: S_new[%d] : (%b)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) i, tb->S_new[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static void balance_leaf_finish_node_insert(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) const char * const body)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) buffer_info_init_tbS0(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) leaf_insert_into_buf(&bi, tb->item_pos, ih, body, tb->zeroes_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /* If we insert the first key change the delimiting key */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) if (tb->item_pos == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) if (tb->CFL[0]) /* can be 0 in reiserfsck */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) static void balance_leaf_finish_node_paste_dirent(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) const char * const body)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) struct item_head *pasted = item_head(tbS0, tb->item_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) if (tb->pos_in_item >= 0 && tb->pos_in_item <= ih_entry_count(pasted)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) RFALSE(!tb->insert_size[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) "PAP-12260: insert_size is 0 already");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) /* prepare space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) buffer_info_init_tbS0(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) leaf_paste_in_buffer(&bi, tb->item_pos, tb->pos_in_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) tb->insert_size[0], body, tb->zeroes_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /* paste entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) leaf_paste_entries(&bi, tb->item_pos, tb->pos_in_item, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) (struct reiserfs_de_head *)body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) body + DEH_SIZE, tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) if (!tb->item_pos && !tb->pos_in_item) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) RFALSE(!tb->CFL[0] || !tb->L[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) "PAP-12270: CFL[0]/L[0] must be specified");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (tb->CFL[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) replace_key(tb, tb->CFL[0], tb->lkey[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) tbS0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) tb->insert_size[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) static void balance_leaf_finish_node_paste(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) const char * const body)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) struct item_head *pasted = item_head(tbS0, tb->item_pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) /* when directory, may be new entry already pasted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (is_direntry_le_ih(pasted)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) balance_leaf_finish_node_paste_dirent(tb, ih, body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /* regular object */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) if (tb->pos_in_item == ih_item_len(pasted)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) RFALSE(tb->insert_size[0] <= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) "PAP-12275: insert size must not be %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) buffer_info_init_tbS0(tb, &bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) leaf_paste_in_buffer(&bi, tb->item_pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) tb->pos_in_item, tb->insert_size[0], body,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) tb->zeroes_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (is_indirect_le_ih(pasted))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) set_ih_free_space(pasted, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) tb->insert_size[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) else if (tb->insert_size[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) print_cur_tb("12285");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) reiserfs_panic(tb->tb_sb, "PAP-12285",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) "insert_size must be 0 (%d)", tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) * if the affected item was not wholly shifted then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * perform all necessary operations on that part or whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * of the affected item which remains in S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) static void balance_leaf_finish_node(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) struct item_head * const ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) const char * const body, int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* if we must insert or append into buffer S[0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) if (0 <= tb->item_pos && tb->item_pos < tb->s0num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) if (flag == M_INSERT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) balance_leaf_finish_node_insert(tb, ih, body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) else /* M_PASTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) balance_leaf_finish_node_paste(tb, ih, body);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) * balance_leaf - reiserfs tree balancing algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * @tb: tree balance state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * @ih: item header of inserted item (little endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) * @body: body of inserted item or bytes to paste
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * @flag: i - insert, d - delete, c - cut, p - paste (see do_balance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * passed back:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * @insert_key: key to insert new nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * @insert_ptr: array of nodes to insert at the next level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * In our processing of one level we sometimes determine what must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) * inserted into the next higher level. This insertion consists of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * key or two keys and their corresponding pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) static int balance_leaf(struct tree_balance *tb, struct item_head *ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) const char *body, int flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) struct item_head *insert_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) struct buffer_head **insert_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) PROC_INFO_INC(tb->tb_sb, balance_at[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) /* Make balance in case insert_size[0] < 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) if (tb->insert_size[0] < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) return balance_leaf_when_delete(tb, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) tb->item_pos = PATH_LAST_POSITION(tb->tb_path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) tb->pos_in_item = tb->tb_path->pos_in_item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) tb->zeroes_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (flag == M_INSERT && !body)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) tb->zeroes_num = ih_item_len(ih);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * for indirect item pos_in_item is measured in unformatted node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * pointers. Recalculate to bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) if (flag != M_INSERT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) && is_indirect_le_ih(item_head(tbS0, tb->item_pos)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) tb->pos_in_item *= UNFM_P_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) body += balance_leaf_left(tb, ih, body, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) /* tb->lnum[0] > 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) /* Calculate new item position */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) tb->item_pos -= (tb->lnum[0] - ((tb->lbytes != -1) ? 1 : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) balance_leaf_right(tb, ih, body, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) /* tb->rnum[0] > 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) RFALSE(tb->blknum[0] > 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) RFALSE(tb->blknum[0] < 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) * if while adding to a node we discover that it is possible to split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) * it in two, and merge the left part into the left neighbor and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) * right part into the right neighbor, eliminating the node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) if (tb->blknum[0] == 0) { /* node S[0] is empty now */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) RFALSE(!tb->lnum[0] || !tb->rnum[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) "PAP-12190: lnum and rnum must not be zero");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) * if insertion was done before 0-th position in R[0], right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * delimiting key of the tb->L[0]'s and left delimiting key are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * not set correctly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (tb->CFL[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) if (!tb->CFR[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) reiserfs_panic(tb->tb_sb, "vs-12195",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) "CFR not initialized");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) copy_key(internal_key(tb->CFL[0], tb->lkey[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) internal_key(tb->CFR[0], tb->rkey[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) do_balance_mark_internal_dirty(tb, tb->CFL[0], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) reiserfs_invalidate_buffer(tb, tbS0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) balance_leaf_new_nodes(tb, ih, body, insert_key, insert_ptr, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) balance_leaf_finish_node(tb, ih, body, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (flag == M_PASTE && tb->insert_size[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) print_cur_tb("12290");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) reiserfs_panic(tb->tb_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) "PAP-12290", "insert_size is still not 0 (%d)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) tb->insert_size[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) /* Leaf level of the tree is balanced (end of balance_leaf) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) /* Make empty node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) void make_empty_node(struct buffer_info *bi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) struct block_head *blkh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) RFALSE(bi->bi_bh == NULL, "PAP-12295: pointer to the buffer is NULL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) blkh = B_BLK_HEAD(bi->bi_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) set_blkh_nr_item(blkh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) set_blkh_free_space(blkh, MAX_CHILD_SIZE(bi->bi_bh));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) if (bi->bi_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) B_N_CHILD(bi->bi_parent, bi->bi_position)->dc_size = 0; /* Endian safe if 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /* Get first empty buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) struct buffer_head *get_FEB(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) struct buffer_info bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) for (i = 0; i < MAX_FEB_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (tb->FEB[i] != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (i == MAX_FEB_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) reiserfs_panic(tb->tb_sb, "vs-12300", "FEB list is empty");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) buffer_info_init_bh(tb, &bi, tb->FEB[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) make_empty_node(&bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) set_buffer_uptodate(tb->FEB[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) tb->used[i] = tb->FEB[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) tb->FEB[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) return tb->used[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) /* This is now used because reiserfs_free_block has to be able to schedule. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) static void store_thrown(struct tree_balance *tb, struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) if (buffer_dirty(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) reiserfs_warning(tb->tb_sb, "reiserfs-12320",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) "called with dirty buffer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) for (i = 0; i < ARRAY_SIZE(tb->thrown); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (!tb->thrown[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) tb->thrown[i] = bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) get_bh(bh); /* free_thrown puts this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) reiserfs_warning(tb->tb_sb, "reiserfs-12321",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) "too many thrown buffers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) static void free_thrown(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) b_blocknr_t blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) for (i = 0; i < ARRAY_SIZE(tb->thrown); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) if (tb->thrown[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) blocknr = tb->thrown[i]->b_blocknr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (buffer_dirty(tb->thrown[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) reiserfs_warning(tb->tb_sb, "reiserfs-12322",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) "called with dirty buffer %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) brelse(tb->thrown[i]); /* incremented in store_thrown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) reiserfs_free_block(tb->transaction_handle, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) blocknr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) void reiserfs_invalidate_buffer(struct tree_balance *tb, struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) struct block_head *blkh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) blkh = B_BLK_HEAD(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) set_blkh_level(blkh, FREE_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) set_blkh_nr_item(blkh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) clear_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) store_thrown(tb, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) /* Replace n_dest'th key in buffer dest by n_src'th key of buffer src.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) void replace_key(struct tree_balance *tb, struct buffer_head *dest, int n_dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) struct buffer_head *src, int n_src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) RFALSE(dest == NULL || src == NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) "vs-12305: source or destination buffer is 0 (src=%p, dest=%p)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) src, dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) RFALSE(!B_IS_KEYS_LEVEL(dest),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) "vs-12310: invalid level (%z) for destination buffer. dest must be leaf",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) RFALSE(n_dest < 0 || n_src < 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) "vs-12315: src(%d) or dest(%d) key number < 0", n_src, n_dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) RFALSE(n_dest >= B_NR_ITEMS(dest) || n_src >= B_NR_ITEMS(src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) "vs-12320: src(%d(%d)) or dest(%d(%d)) key number is too big",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) n_src, B_NR_ITEMS(src), n_dest, B_NR_ITEMS(dest));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) if (B_IS_ITEMS_LEVEL(src))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) /* source buffer contains leaf node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) memcpy(internal_key(dest, n_dest), item_head(src, n_src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) memcpy(internal_key(dest, n_dest), internal_key(src, n_src),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) KEY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) do_balance_mark_internal_dirty(tb, dest, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) int get_left_neighbor_position(struct tree_balance *tb, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FL[h] == NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) "vs-12325: FL[%d](%p) or F[%d](%p) does not exist",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) h, tb->FL[h], h, PATH_H_PPARENT(tb->tb_path, h));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (Sh_position == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) return B_NR_ITEMS(tb->FL[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return Sh_position - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) int get_right_neighbor_position(struct tree_balance *tb, int h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) int Sh_position = PATH_H_POSITION(tb->tb_path, h + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) RFALSE(PATH_H_PPARENT(tb->tb_path, h) == NULL || tb->FR[h] == NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) "vs-12330: F[%d](%p) or FR[%d](%p) does not exist",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) h, PATH_H_PPARENT(tb->tb_path, h), h, tb->FR[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) if (Sh_position == B_NR_ITEMS(PATH_H_PPARENT(tb->tb_path, h)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) return Sh_position + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) static void check_internal_node(struct super_block *s, struct buffer_head *bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) char *mes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) struct disk_child *dc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) RFALSE(!bh, "PAP-12336: bh == 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) if (!bh || !B_IS_IN_TREE(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) RFALSE(!buffer_dirty(bh) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) !(buffer_journaled(bh) || buffer_journal_dirty(bh)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) "PAP-12337: buffer (%b) must be dirty", bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) dc = B_N_CHILD(bh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) for (i = 0; i <= B_NR_ITEMS(bh); i++, dc++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (!is_reusable(s, dc_block_number(dc), 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) print_cur_tb(mes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) reiserfs_panic(s, "PAP-12338",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) "invalid child pointer %y in %b",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) dc, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static int locked_or_not_in_tree(struct tree_balance *tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) struct buffer_head *bh, char *which)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if ((!buffer_journal_prepared(bh) && buffer_locked(bh)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) !B_IS_IN_TREE(bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) reiserfs_warning(tb->tb_sb, "vs-12339", "%s (%b)", which, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) static int check_before_balancing(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (REISERFS_SB(tb->tb_sb)->cur_tb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) reiserfs_panic(tb->tb_sb, "vs-12335", "suspect that schedule "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) "occurred based on cur_tb not being null at "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) "this point in code. do_balance cannot properly "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) "handle concurrent tree accesses on a same "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) "mount point.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) * double check that buffers that we will modify are unlocked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) * (fix_nodes should already have prepped all of these for us).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (tb->lnum[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) retval |= locked_or_not_in_tree(tb, tb->L[0], "L[0]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) retval |= locked_or_not_in_tree(tb, tb->FL[0], "FL[0]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) retval |= locked_or_not_in_tree(tb, tb->CFL[0], "CFL[0]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) check_leaf(tb->L[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (tb->rnum[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) retval |= locked_or_not_in_tree(tb, tb->R[0], "R[0]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) retval |= locked_or_not_in_tree(tb, tb->FR[0], "FR[0]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) retval |= locked_or_not_in_tree(tb, tb->CFR[0], "CFR[0]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) check_leaf(tb->R[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) retval |= locked_or_not_in_tree(tb, PATH_PLAST_BUFFER(tb->tb_path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) "S[0]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) check_leaf(PATH_PLAST_BUFFER(tb->tb_path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) static void check_after_balance_leaf(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (tb->lnum[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) if (B_FREE_SPACE(tb->L[0]) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) MAX_CHILD_SIZE(tb->L[0]) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) dc_size(B_N_CHILD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) (tb->FL[0], get_left_neighbor_position(tb, 0)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) print_cur_tb("12221");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) reiserfs_panic(tb->tb_sb, "PAP-12355",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) "shift to left was incorrect");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) if (tb->rnum[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) if (B_FREE_SPACE(tb->R[0]) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) MAX_CHILD_SIZE(tb->R[0]) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) dc_size(B_N_CHILD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) (tb->FR[0], get_right_neighbor_position(tb, 0)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) print_cur_tb("12222");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) reiserfs_panic(tb->tb_sb, "PAP-12360",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) "shift to right was incorrect");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) if (PATH_H_PBUFFER(tb->tb_path, 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) (B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0)) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) PATH_H_POSITION(tb->tb_path, 1)))))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) int left = B_FREE_SPACE(PATH_H_PBUFFER(tb->tb_path, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) int right = (MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) dc_size(B_N_CHILD(PATH_H_PBUFFER(tb->tb_path, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) PATH_H_POSITION(tb->tb_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 1))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) print_cur_tb("12223");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) reiserfs_warning(tb->tb_sb, "reiserfs-12363",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) "B_FREE_SPACE (PATH_H_PBUFFER(tb->tb_path,0)) = %d; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) "MAX_CHILD_SIZE (%d) - dc_size( %y, %d ) [%d] = %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) left,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) MAX_CHILD_SIZE(PATH_H_PBUFFER(tb->tb_path, 0)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) PATH_H_PBUFFER(tb->tb_path, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) PATH_H_POSITION(tb->tb_path, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) dc_size(B_N_CHILD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) (PATH_H_PBUFFER(tb->tb_path, 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) PATH_H_POSITION(tb->tb_path, 1))),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) reiserfs_panic(tb->tb_sb, "PAP-12365", "S is incorrect");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) static void check_leaf_level(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) check_leaf(tb->L[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) check_leaf(tb->R[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) check_leaf(PATH_PLAST_BUFFER(tb->tb_path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) static void check_internal_levels(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) int h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) /* check all internal nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) for (h = 1; tb->insert_size[h]; h++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) check_internal_node(tb->tb_sb, PATH_H_PBUFFER(tb->tb_path, h),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) "BAD BUFFER ON PATH");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (tb->lnum[h])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) check_internal_node(tb->tb_sb, tb->L[h], "BAD L");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) if (tb->rnum[h])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) check_internal_node(tb->tb_sb, tb->R[h], "BAD R");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) * Now we have all of the buffers that must be used in balancing of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) * the tree. We rely on the assumption that schedule() will not occur
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) * while do_balance works. ( Only interrupt handlers are acceptable.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) * We balance the tree according to the analysis made before this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * using buffers already obtained. For SMP support it will someday be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) * necessary to add ordered locking of tb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) * Some interesting rules of balancing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) * we delete a maximum of two nodes per level per balancing: we never
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) * delete R, when we delete two of three nodes L, S, R then we move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) * them into R.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) * we only delete L if we are deleting two nodes, if we delete only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * one node we delete S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) * if we shift leaves then we shift as much as we can: this is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) * deliberate policy of extremism in node packing which results in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) * higher average utilization after repeated random balance operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) * at the cost of more memory copies and more balancing as a result of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * small insertions to full nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * if we shift internal nodes we try to evenly balance the node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * utilization, with consequent less balancing at the cost of lower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * utilization.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) * one could argue that the policy for directories in leaves should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) * that of internal nodes, but we will wait until another day to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * evaluate this.... It would be nice to someday measure and prove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * these assumptions as to what is optimal....
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) static inline void do_balance_starts(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) /* use print_cur_tb() to see initial state of struct tree_balance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) /* store_print_tb (tb); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) /* do not delete, just comment it out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) print_tb(flag, PATH_LAST_POSITION(tb->tb_path),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) tb->tb_path->pos_in_item, tb, "check");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) REISERFS_SB(tb->tb_sb)->cur_tb = tb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) static inline void do_balance_completed(struct tree_balance *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) check_leaf_level(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) check_internal_levels(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) REISERFS_SB(tb->tb_sb)->cur_tb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) * reiserfs_free_block is no longer schedule safe. So, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) * put the buffers we want freed on the thrown list during do_balance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) * and then free them now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) REISERFS_SB(tb->tb_sb)->s_do_balance++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) /* release all nodes hold to perform the balancing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) unfix_nodes(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) free_thrown(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) * do_balance - balance the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * @tb: tree_balance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) * @ih: item header of inserted item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * @body: body of inserted item or bytes to paste
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * @flag: 'i' - insert, 'd' - delete, 'c' - cut, 'p' paste
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) * Cut means delete part of an item (includes removing an entry from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) * directory).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) * Delete means delete whole item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) * Insert means add a new item into the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * Paste means to append to the end of an existing file or to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * insert a directory entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) void do_balance(struct tree_balance *tb, struct item_head *ih,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) const char *body, int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) int child_pos; /* position of a child node in its parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) int h; /* level of the tree being processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) * in our processing of one level we sometimes determine what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * must be inserted into the next higher level. This insertion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) * consists of a key or two keys and their corresponding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) * pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) struct item_head insert_key[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) /* inserted node-ptrs for the next level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) struct buffer_head *insert_ptr[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) tb->tb_mode = flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) tb->need_balance_dirty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) if (FILESYSTEM_CHANGED_TB(tb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) reiserfs_panic(tb->tb_sb, "clm-6000", "fs generation has "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) "changed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) /* if we have no real work to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (!tb->insert_size[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) reiserfs_warning(tb->tb_sb, "PAP-12350",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) "insert_size == 0, mode == %c", flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) unfix_nodes(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) atomic_inc(&fs_generation(tb->tb_sb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) do_balance_starts(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * balance_leaf returns 0 except if combining L R and S into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * one node. see balance_internal() for explanation of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * line of code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) child_pos = PATH_H_B_ITEM_ORDER(tb->tb_path, 0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) balance_leaf(tb, ih, body, flag, insert_key, insert_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) #ifdef CONFIG_REISERFS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) check_after_balance_leaf(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) /* Balance internal level of the tree. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) for (h = 1; h < MAX_HEIGHT && tb->insert_size[h]; h++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) child_pos = balance_internal(tb, h, child_pos, insert_key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) insert_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) do_balance_completed(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) }