^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/fs/hfs/btree.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Brad Boyer (flar@allandria.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * (C) 2003 Ardis Technologies <roman@ardistech.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Handle opening/closing btree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "btree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /* Get a reference to a B*Tree and do some initial checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp keycmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct hfs_btree *tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct hfs_btree_header_rec *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) tree = kzalloc(sizeof(*tree), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) if (!tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) mutex_init(&tree->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) spin_lock_init(&tree->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* Set the correct compare function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) tree->sb = sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) tree->cnid = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) tree->keycmp = keycmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) tree->inode = iget_locked(sb, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) if (!tree->inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) goto free_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) BUG_ON(!(tree->inode->i_state & I_NEW));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct hfs_mdb *mdb = HFS_SB(sb)->mdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) HFS_I(tree->inode)->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) mutex_init(&HFS_I(tree->inode)->extents_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) switch (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) case HFS_EXT_CNID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) hfs_inode_read_fork(tree->inode, mdb->drXTExtRec, mdb->drXTFlSize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) mdb->drXTFlSize, be32_to_cpu(mdb->drXTClpSiz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (HFS_I(tree->inode)->alloc_blocks >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) HFS_I(tree->inode)->first_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) pr_err("invalid btree extent records\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unlock_new_inode(tree->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) goto free_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) tree->inode->i_mapping->a_ops = &hfs_btree_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) case HFS_CAT_CNID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) hfs_inode_read_fork(tree->inode, mdb->drCTExtRec, mdb->drCTFlSize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) mdb->drCTFlSize, be32_to_cpu(mdb->drCTClpSiz));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (!HFS_I(tree->inode)->first_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) pr_err("invalid btree extent records (0 size)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unlock_new_inode(tree->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) goto free_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) tree->inode->i_mapping->a_ops = &hfs_btree_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unlock_new_inode(tree->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) mapping = tree->inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) page = read_mapping_page(mapping, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) goto free_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* Load the header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) tree->root = be32_to_cpu(head->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) tree->leaf_count = be32_to_cpu(head->leaf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) tree->leaf_head = be32_to_cpu(head->leaf_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) tree->leaf_tail = be32_to_cpu(head->leaf_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) tree->node_count = be32_to_cpu(head->node_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) tree->free_nodes = be32_to_cpu(head->free_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) tree->attributes = be32_to_cpu(head->attributes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) tree->node_size = be16_to_cpu(head->node_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) tree->max_key_len = be16_to_cpu(head->max_key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) tree->depth = be16_to_cpu(head->depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) size = tree->node_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (!is_power_of_2(size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) goto fail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (!tree->node_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) goto fail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) switch (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) case HFS_EXT_CNID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (tree->max_key_len != HFS_MAX_EXT_KEYLEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) pr_err("invalid extent max_key_len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) tree->max_key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) goto fail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) case HFS_CAT_CNID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (tree->max_key_len != HFS_MAX_CAT_KEYLEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) pr_err("invalid catalog max_key_len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) tree->max_key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) goto fail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) tree->node_size_shift = ffs(size) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) fail_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) free_inode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) tree->inode->i_mapping->a_ops = &hfs_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) iput(tree->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) free_tree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) kfree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Release resources used by a btree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void hfs_btree_close(struct hfs_btree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct hfs_bnode *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (!tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) for (i = 0; i < NODE_HASH_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) while ((node = tree->node_hash[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) tree->node_hash[i] = node->next_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (atomic_read(&node->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) pr_err("node %d:%d still has %d user(s)!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) node->tree->cnid, node->this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) atomic_read(&node->refcnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) hfs_bnode_free(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) tree->node_hash_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) iput(tree->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) kfree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) void hfs_btree_write(struct hfs_btree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct hfs_btree_header_rec *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct hfs_bnode *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) node = hfs_bnode_find(tree, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (IS_ERR(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* panic? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* Load the header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) page = node->page[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) head->root = cpu_to_be32(tree->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) head->leaf_count = cpu_to_be32(tree->leaf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) head->leaf_head = cpu_to_be32(tree->leaf_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) head->leaf_tail = cpu_to_be32(tree->leaf_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) head->node_count = cpu_to_be32(tree->node_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) head->free_nodes = cpu_to_be32(tree->free_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) head->attributes = cpu_to_be32(tree->attributes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) head->depth = cpu_to_be16(tree->depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct hfs_btree *tree = prev->tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct hfs_bnode *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct hfs_bnode_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) __be32 cnid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) node = hfs_bnode_create(tree, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (IS_ERR(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (!tree->free_nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) panic("FIXME!!!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) tree->free_nodes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) prev->next = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) cnid = cpu_to_be32(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) node->type = HFS_NODE_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) node->num_recs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) hfs_bnode_clear(node, 0, tree->node_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) desc.next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) desc.prev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) desc.type = HFS_NODE_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) desc.height = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) desc.num_recs = cpu_to_be16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) desc.reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) hfs_bnode_write(node, &desc, 0, sizeof(desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) hfs_bnode_write_u16(node, 14, 0x8000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) hfs_bnode_write_u16(node, tree->node_size - 2, 14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /* Make sure @tree has enough space for the @rsvd_nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct inode *inode = tree->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) while (tree->free_nodes < rsvd_nodes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) res = hfs_extend_file(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) HFS_I(inode)->phys_size = inode->i_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) (loff_t)HFS_I(inode)->alloc_blocks *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) HFS_SB(tree->sb)->alloc_blksz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) HFS_I(inode)->fs_blocks = inode->i_size >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) tree->sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) inode_set_bytes(inode, inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) count = inode->i_size >> tree->node_size_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) tree->free_nodes += count - tree->node_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) tree->node_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct hfs_bnode *node, *next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) struct page **pagep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) u32 nidx, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) unsigned off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) u16 off16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) u16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) u8 *data, byte, m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) int i, res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) res = hfs_bmap_reserve(tree, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return ERR_PTR(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) nidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) node = hfs_bnode_find(tree, nidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (IS_ERR(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) len = hfs_brec_lenoff(node, 2, &off16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) off = off16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) off += node->page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) pagep = node->page + (off >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) data = kmap(*pagep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) off &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) byte = data[off];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (byte != 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (!(byte & m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) idx += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) data[off] |= m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) set_page_dirty(*pagep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) kunmap(*pagep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) tree->free_nodes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) mark_inode_dirty(tree->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return hfs_bnode_create(tree, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (++off >= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) kunmap(*pagep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) data = kmap(*++pagep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) idx += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) kunmap(*pagep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) nidx = node->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!nidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) printk(KERN_DEBUG "create new bmap node...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) next_node = hfs_bmap_new_bmap(node, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) next_node = hfs_bnode_find(tree, nidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (IS_ERR(next_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) return next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) node = next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) len = hfs_brec_lenoff(node, 0, &off16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) off = off16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) off += node->page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) pagep = node->page + (off >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) data = kmap(*pagep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) off &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) void hfs_bmap_free(struct hfs_bnode *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct hfs_btree *tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) u16 off, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) u32 nidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) u8 *data, byte, m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) tree = node->tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) nidx = node->this;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) node = hfs_bnode_find(tree, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) if (IS_ERR(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) len = hfs_brec_lenoff(node, 2, &off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) while (nidx >= len * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) nidx -= len * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) i = node->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* panic */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) pr_crit("unable to free bnode %u. bmap not found!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) node->this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) node = hfs_bnode_find(tree, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (IS_ERR(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (node->type != HFS_NODE_MAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* panic */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) pr_crit("invalid bmap found! (%u,%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) node->this, node->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) len = hfs_brec_lenoff(node, 0, &off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) off += node->page_offset + nidx / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) page = node->page[off >> PAGE_SHIFT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) data = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) off &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) m = 1 << (~nidx & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) byte = data[off];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (!(byte & m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) pr_crit("trying to free free bnode %u(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) node->this, node->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) data[off] = byte & ~m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) tree->free_nodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) mark_inode_dirty(tree->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }