Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  linux/fs/hfsplus/btree.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Brad Boyer (flar@allandria.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * (C) 2003 Ardis Technologies <roman@ardistech.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Handle opening/closing btree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "hfsplus_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include "hfsplus_raw.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * Initial source code of clump size calculation is gotten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * from http://opensource.apple.com/tarballs/diskdev_cmds/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define CLUMP_ENTRIES	15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) static short clumptbl[CLUMP_ENTRIES * 3] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  *	    Volume	Attributes	 Catalog	 Extents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *	     Size	Clump (MB)	Clump (MB)	Clump (MB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	/*   1GB */	  4,		  4,		 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	/*   2GB */	  6,		  6,		 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	/*   4GB */	  8,		  8,		 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	/*   8GB */	 11,		 11,		 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	 * For volumes 16GB and larger, we want to make sure that a full OS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	 * install won't require fragmentation of the Catalog or Attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	 * B-trees.  We do this by making the clump sizes sufficiently large,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	 * and by leaving a gap after the B-trees for them to grow into.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	 * For SnowLeopard 10A298, a FullNetInstall with all packages selected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	 * results in:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	 * Catalog B-tree Header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	 *	nodeSize:          8192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	 *	totalNodes:       31616
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	 *	freeNodes:         1978
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	 * (used = 231.55 MB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	 * Attributes B-tree Header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	 *	nodeSize:          8192
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	 *	totalNodes:       63232
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	 *	freeNodes:          958
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	 * (used = 486.52 MB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	 * We also want Time Machine backup volumes to have a sufficiently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	 * large clump size to reduce fragmentation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	 * The series of numbers for Catalog and Attribute form a geometric
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	 * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	 * the previous term.  For Attributes (16GB to 512GB), each term is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	 * 4**(1/5) times the previous term.  For 1TB to 16TB, each term is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	 * 2**(1/5) times the previous term.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	/*  16GB */	 64,		 32,		 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	/*  32GB */	 84,		 49,		 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	/*  64GB */	111,		 74,		 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	/* 128GB */	147,		111,		 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	/* 256GB */	194,		169,		 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	/* 512GB */	256,		256,		11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	/*   1TB */	294,		294,		14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	/*   2TB */	338,		338,		16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	/*   4TB */	388,		388,		20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	/*   8TB */	446,		446,		25,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	/*  16TB */	512,		512,		32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 					u64 sectors, int file_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	u32 mod = max(node_size, block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	u32 clump_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	int column;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	/* Figure out which column of the above table to use for this file. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	switch (file_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	case HFSPLUS_ATTR_CNID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		column = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	case HFSPLUS_CAT_CNID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		column = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		column = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	 * The default clump size is 0.8% of the volume size. And
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	 * it must also be a multiple of the node and block size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	if (sectors < 0x200000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		clump_size = sectors << 2;	/*  0.8 %  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		if (clump_size < (8 * node_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 			clump_size = 8 * node_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		/* turn exponent into table index... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		for (i = 0, sectors = sectors >> 22;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		     sectors && (i < CLUMP_ENTRIES - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		     ++i, sectors = sectors >> 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			/* empty body */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		clump_size = clumptbl[column + (i) * 3] * 1024 * 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	 * Round the clump size to a multiple of node and block size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	 * NOTE: This rounds down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	clump_size /= mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	clump_size *= mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	 * Rounding down could have rounded down to 0 if the block size was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	 * greater than the clump size.  If so, just use one block or node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	if (clump_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		clump_size = mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	return clump_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Get a reference to a B*Tree and do some initial checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	struct hfs_btree *tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	struct hfs_btree_header_rec *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	struct inode *inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	unsigned int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	tree = kzalloc(sizeof(*tree), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	if (!tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	mutex_init(&tree->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	spin_lock_init(&tree->hash_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	tree->sb = sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	tree->cnid = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	inode = hfsplus_iget(sb, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	if (IS_ERR(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		goto free_tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	tree->inode = inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	if (!HFSPLUS_I(tree->inode)->first_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		pr_err("invalid btree extent records (0 size)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		goto free_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	mapping = tree->inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	page = read_mapping_page(mapping, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		goto free_inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	/* Load the header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	head = (struct hfs_btree_header_rec *)(kmap(page) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		sizeof(struct hfs_bnode_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	tree->root = be32_to_cpu(head->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	tree->leaf_count = be32_to_cpu(head->leaf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	tree->leaf_head = be32_to_cpu(head->leaf_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	tree->leaf_tail = be32_to_cpu(head->leaf_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	tree->node_count = be32_to_cpu(head->node_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	tree->free_nodes = be32_to_cpu(head->free_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	tree->attributes = be32_to_cpu(head->attributes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	tree->node_size = be16_to_cpu(head->node_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	tree->max_key_len = be16_to_cpu(head->max_key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	tree->depth = be16_to_cpu(head->depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	/* Verify the tree and set the correct compare function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	switch (id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	case HFSPLUS_EXT_CNID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			pr_err("invalid extent max_key_len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 				tree->max_key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			goto fail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		if (tree->attributes & HFS_TREE_VARIDXKEYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			pr_err("invalid extent btree flag\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			goto fail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		tree->keycmp = hfsplus_ext_cmp_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	case HFSPLUS_CAT_CNID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			pr_err("invalid catalog max_key_len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 				tree->max_key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			goto fail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			pr_err("invalid catalog btree flag\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			goto fail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		    (head->key_type == HFSPLUS_KEY_BINARY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			tree->keycmp = hfsplus_cat_bin_cmp_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			tree->keycmp = hfsplus_cat_case_cmp_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 			set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	case HFSPLUS_ATTR_CNID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			pr_err("invalid attributes max_key_len %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 				tree->max_key_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			goto fail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		tree->keycmp = hfsplus_attr_bin_cmp_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		pr_err("unknown B*Tree requested\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		goto fail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		pr_err("invalid btree flag\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		goto fail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	size = tree->node_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	if (!is_power_of_2(size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		goto fail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (!tree->node_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		goto fail_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	tree->node_size_shift = ffs(size) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	tree->pages_per_bnode =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		(tree->node_size + PAGE_SIZE - 1) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	return tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)  fail_page:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	put_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  free_inode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	tree->inode->i_mapping->a_ops = &hfsplus_aops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	iput(tree->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)  free_tree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	kfree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Release resources used by a btree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) void hfs_btree_close(struct hfs_btree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	struct hfs_bnode *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (!tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	for (i = 0; i < NODE_HASH_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		while ((node = tree->node_hash[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			tree->node_hash[i] = node->next_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 			if (atomic_read(&node->refcnt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 				pr_crit("node %d:%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 						"still has %d user(s)!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 					node->tree->cnid, node->this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 					atomic_read(&node->refcnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 			hfs_bnode_free(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 			tree->node_hash_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	iput(tree->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	kfree(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) int hfs_btree_write(struct hfs_btree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	struct hfs_btree_header_rec *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	struct hfs_bnode *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	node = hfs_bnode_find(tree, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	if (IS_ERR(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		/* panic? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	/* Load the header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	page = node->page[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	head = (struct hfs_btree_header_rec *)(kmap(page) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		sizeof(struct hfs_bnode_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	head->root = cpu_to_be32(tree->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	head->leaf_count = cpu_to_be32(tree->leaf_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	head->leaf_head = cpu_to_be32(tree->leaf_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	head->leaf_tail = cpu_to_be32(tree->leaf_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	head->node_count = cpu_to_be32(tree->node_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	head->free_nodes = cpu_to_be32(tree->free_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	head->attributes = cpu_to_be32(tree->attributes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	head->depth = cpu_to_be16(tree->depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	struct hfs_btree *tree = prev->tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	struct hfs_bnode *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	struct hfs_bnode_desc desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	__be32 cnid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	node = hfs_bnode_create(tree, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (IS_ERR(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	tree->free_nodes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	prev->next = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	cnid = cpu_to_be32(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	node->type = HFS_NODE_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	node->num_recs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	hfs_bnode_clear(node, 0, tree->node_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	desc.next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	desc.prev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	desc.type = HFS_NODE_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	desc.height = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	desc.num_recs = cpu_to_be16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	desc.reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	hfs_bnode_write(node, &desc, 0, sizeof(desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	hfs_bnode_write_u16(node, 14, 0x8000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	hfs_bnode_write_u16(node, tree->node_size - 2, 14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* Make sure @tree has enough space for the @rsvd_nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	struct inode *inode = tree->inode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	if (rsvd_nodes <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	while (tree->free_nodes < rsvd_nodes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		hip->phys_size = inode->i_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			(loff_t)hip->alloc_blocks <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 				HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		hip->fs_blocks =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		inode_set_bytes(inode, inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		count = inode->i_size >> tree->node_size_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		tree->free_nodes += count - tree->node_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		tree->node_count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	struct hfs_bnode *node, *next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	struct page **pagep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	u32 nidx, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	unsigned off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	u16 off16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	u16 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	u8 *data, byte, m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	int i, res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	res = hfs_bmap_reserve(tree, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		return ERR_PTR(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	nidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	node = hfs_bnode_find(tree, nidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	if (IS_ERR(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	len = hfs_brec_lenoff(node, 2, &off16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	off = off16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	off += node->page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	pagep = node->page + (off >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	data = kmap(*pagep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	off &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 			byte = data[off];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 			if (byte != 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 				for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 					if (!(byte & m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 						idx += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 						data[off] |= m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 						set_page_dirty(*pagep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 						kunmap(*pagep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 						tree->free_nodes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 						mark_inode_dirty(tree->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 						hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 						return hfs_bnode_create(tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 							idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 			if (++off >= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 				kunmap(*pagep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 				data = kmap(*++pagep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 				off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 			idx += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 			len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		kunmap(*pagep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		nidx = node->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		if (!nidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			hfs_dbg(BNODE_MOD, "create new bmap node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 			next_node = hfs_bmap_new_bmap(node, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 			next_node = hfs_bnode_find(tree, nidx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		if (IS_ERR(next_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 			return next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		node = next_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		len = hfs_brec_lenoff(node, 0, &off16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		off = off16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		off += node->page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		pagep = node->page + (off >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		data = kmap(*pagep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		off &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) void hfs_bmap_free(struct hfs_bnode *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	struct hfs_btree *tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	u16 off, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	u32 nidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	u8 *data, byte, m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	BUG_ON(!node->this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	tree = node->tree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	nidx = node->this;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	node = hfs_bnode_find(tree, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	if (IS_ERR(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	len = hfs_brec_lenoff(node, 2, &off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	while (nidx >= len * 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		nidx -= len * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		i = node->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		if (!i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 			/* panic */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 			pr_crit("unable to free bnode %u. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 					"bmap not found!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 				node->this);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 			hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		node = hfs_bnode_find(tree, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		if (IS_ERR(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		if (node->type != HFS_NODE_MAP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 			/* panic */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 			pr_crit("invalid bmap found! "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 					"(%u,%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 				node->this, node->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 			hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		len = hfs_brec_lenoff(node, 0, &off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	off += node->page_offset + nidx / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	page = node->page[off >> PAGE_SHIFT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	data = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	off &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	m = 1 << (~nidx & 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	byte = data[off];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	if (!(byte & m)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		pr_crit("trying to free free bnode "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 				"%u(%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 			node->this, node->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	data[off] = byte & ~m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	hfs_bnode_put(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	tree->free_nodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	mark_inode_dirty(tree->inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }