Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  linux/fs/hpfs/anode.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *  handling HPFS anode tree that contains file allocation info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "hpfs_fn.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) /* Find a sector in allocation tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 		   struct bplus_header *btree, unsigned sec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 		   struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	anode_secno a = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	struct anode *anode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	int c1, c2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	go_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	if (bp_internal(btree)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 		for (i = 0; i < btree->n_used_nodes; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 			if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 				a = le32_to_cpu(btree->u.internal[i].down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 				brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 				if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 				btree = &anode->btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 				goto go_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 		hpfs_error(s, "sector %08x not found in internal anode %08x", sec, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	for (i = 0; i < btree->n_used_nodes; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		if (le32_to_cpu(btree->u.external[i].file_secno) <= sec &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		    le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 			a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 			if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 				brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 			if (inode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 				struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 				hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 				hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 				hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 			brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			return a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	hpfs_error(s, "sector %08x not found in external anode %08x", sec, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) /* Add a sector to tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsigned fsecno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	struct bplus_header *btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	struct anode *anode = NULL, *ranode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	struct fnode *fnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	anode_secno a, na = -1, ra, up = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	secno se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct buffer_head *bh, *bh1, *bh2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	unsigned fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	int c1, c2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	if (fnod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 		if (!(fnode = hpfs_map_fnode(s, node, &bh))) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		btree = &fnode->btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		if (!(anode = hpfs_map_anode(s, node, &bh))) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		btree = &anode->btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	a = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	go_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	if ((n = btree->n_used_nodes - 1) < -!!fnod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		hpfs_error(s, "anode %08x has no entries", a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	if (bp_internal(btree)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		a = le32_to_cpu(btree->u.internal[n].down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		btree->u.internal[n].file_secno = cpu_to_le32(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		if (hpfs_sb(s)->sb_chk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_add_sector_to_btree #1")) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		btree = &anode->btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		goto go_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	if (n >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 			hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 				le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 				fnod?'f':'a', node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			le32_add_cpu(&btree->u.external[n].length, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 			mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 			brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			return se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		if (fsecno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			hpfs_error(s, "empty file %08x, trying to add sector %08x", node, fsecno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		se = !fnod ? node : (node + 16384) & ~16383;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	}	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if (!btree->n_free_nodes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		up = a != node ? le32_to_cpu(anode->up) : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			hpfs_free_sectors(s, se, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		if (a == node && fnod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			anode->up = cpu_to_le32(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			anode->btree.flags |= BP_fnode_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			anode->btree.n_used_nodes = btree->n_used_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			anode->btree.first_free = btree->first_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 			anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 			memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			btree->flags |= BP_internal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			btree->n_free_nodes = 11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 			btree->n_used_nodes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 			btree->u.internal[0].file_secno = cpu_to_le32(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			btree->u.internal[0].down = cpu_to_le32(na);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 			mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		} else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 			brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			brelse(bh1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 			hpfs_free_sectors(s, se, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			hpfs_free_sectors(s, na, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		bh = bh1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		btree = &anode->btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	btree->n_free_nodes--; n = btree->n_used_nodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	le16_add_cpu(&btree->first_free, 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	btree->u.external[n].disk_secno = cpu_to_le32(se);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	btree->u.external[n].file_secno = cpu_to_le32(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	btree->u.external[n].length = cpu_to_le32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	if ((a == node && fnod) || na == -1) return se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	c2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	while (up != (anode_secno)-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		struct anode *new_anode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		if (hpfs_sb(s)->sb_chk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		if (up != node || !fnod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			if (!(anode = hpfs_map_anode(s, up, &bh))) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			btree = &anode->btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 			if (!(fnode = hpfs_map_fnode(s, up, &bh))) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 			btree = &fnode->btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		if (btree->n_free_nodes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 			btree->n_free_nodes--; n = btree->n_used_nodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 			le16_add_cpu(&btree->first_free, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			btree->u.internal[n].file_secno = cpu_to_le32(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 			btree->u.internal[n].down = cpu_to_le32(na);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			btree->u.internal[n-1].file_secno = cpu_to_le32(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 			mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 			brelse(bh2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			hpfs_free_sectors(s, ra, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			if ((anode = hpfs_map_anode(s, na, &bh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 				anode->up = cpu_to_le32(up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 				if (up == node && fnod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 					anode->btree.flags |= BP_fnode_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 					anode->btree.flags &= ~BP_fnode_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 				mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 				brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			return se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		up = up != node ? le32_to_cpu(anode->up) : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		a = na;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			anode = new_anode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			/*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			anode->btree.flags |= BP_internal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			anode->btree.n_used_nodes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			anode->btree.n_free_nodes = 59;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			anode->btree.first_free = cpu_to_le16(16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			anode->btree.u.internal[0].down = cpu_to_le32(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 			anode->btree.u.internal[0].file_secno = cpu_to_le32(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 			mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 			brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 			if ((anode = hpfs_map_anode(s, a, &bh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 				anode->up = cpu_to_le32(na);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 				mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 				brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		} else na = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	if ((anode = hpfs_map_anode(s, na, &bh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		anode->up = cpu_to_le32(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		if (fnod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			anode->btree.flags |= BP_fnode_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (!fnod) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		if (!(anode = hpfs_map_anode(s, node, &bh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			brelse(bh2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		btree = &anode->btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		if (!(fnode = hpfs_map_fnode(s, node, &bh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			brelse(bh2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		btree = &fnode->btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	ranode->up = cpu_to_le32(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	if (fnod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		ranode->btree.flags |= BP_fnode_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		struct anode *unode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			unode->up = cpu_to_le32(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			unode->btree.flags &= ~BP_fnode_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 			mark_buffer_dirty(bh1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 			brelse(bh1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	btree->flags |= BP_internal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	btree->n_free_nodes = fnod ? 10 : 58;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	btree->n_used_nodes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	btree->u.internal[0].file_secno = cpu_to_le32(fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	btree->u.internal[0].down = cpu_to_le32(ra);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	btree->u.internal[1].file_secno = cpu_to_le32(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	btree->u.internal[1].down = cpu_to_le32(na);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	mark_buffer_dirty(bh2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	brelse(bh2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	return se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)  * Remove allocation tree. Recursion would look much nicer but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)  * I want to avoid it because it can cause stack overflow.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	struct bplus_header *btree1 = btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	struct anode *anode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	anode_secno ano = 0, oano;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	int level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	int pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	int c1, c2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	int d1, d2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	go_down:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	d2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	while (bp_internal(btree1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		ano = le32_to_cpu(btree1->u.internal[pos].down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		if (level) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		if (hpfs_sb(s)->sb_chk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 			if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		btree1 = &anode->btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		level++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 		pos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	for (i = 0; i < btree1->n_used_nodes; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	go_up:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	if (!level) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	if (hpfs_sb(s)->sb_chk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	hpfs_free_sectors(s, ano, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	oano = ano;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	ano = le32_to_cpu(anode->up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	if (--level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		btree1 = &anode->btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	} else btree1 = btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	for (i = 0; i < btree1->n_used_nodes; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		if (le32_to_cpu(btree1->u.internal[i].down) == oano) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			if ((pos = i + 1) < btree1->n_used_nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 				goto go_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 				goto go_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	hpfs_error(s,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		   "reference to anode %08x not found in anode %08x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		   "(probably bad up pointer)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		   oano, level ? ano : -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	if (level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* Just a wrapper around hpfs_bplus_lookup .. used for reading eas */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static secno anode_lookup(struct super_block *s, anode_secno a, unsigned sec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	struct anode *anode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	return hpfs_bplus_lookup(s, NULL, &anode->btree, sec, bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) int hpfs_ea_read(struct super_block *s, secno a, int ano, unsigned pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	    unsigned len, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	char *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	secno sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	unsigned l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		if (ano) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		} else sec = a + (pos >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #1")) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		memcpy(buf, data + (pos & 0x1ff), l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		buf += l; pos += l; len -= l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) int hpfs_ea_write(struct super_block *s, secno a, int ano, unsigned pos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	     unsigned len, const char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	char *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	secno sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	unsigned l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	while (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		if (ano) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			if ((sec = anode_lookup(s, a, pos >> 9)) == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 				return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		} else sec = a + (pos >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, sec, 1, "ea #2")) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		if (!(data = hpfs_map_sector(s, sec, &bh, (len - 1) >> 9)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		l = 0x200 - (pos & 0x1ff); if (l > len) l = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		memcpy(data + (pos & 0x1ff), buf, l);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		buf += l; pos += l; len -= l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) void hpfs_ea_remove(struct super_block *s, secno a, int ano, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	struct anode *anode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	if (ano) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		if (!(anode = hpfs_map_anode(s, a, &bh))) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		hpfs_remove_btree(s, &anode->btree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		hpfs_free_sectors(s, a, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	} else hpfs_free_sectors(s, a, (len + 511) >> 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* Truncate allocation tree. Doesn't join anodes - I hope it doesn't matter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	struct fnode *fnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	struct anode *anode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	struct bplus_header *btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	anode_secno node = f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	int i, j, nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	int c1, c2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	if (fno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		if (!(fnode = hpfs_map_fnode(s, f, &bh))) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		btree = &fnode->btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		if (!(anode = hpfs_map_anode(s, f, &bh))) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		btree = &anode->btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	if (!secs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		hpfs_remove_btree(s, btree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		if (fno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			btree->n_free_nodes = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 			btree->n_used_nodes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			btree->first_free = cpu_to_le16(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 			btree->flags &= ~BP_internal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 			mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		} else hpfs_free_sectors(s, f, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	while (bp_internal(btree)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		nodes = btree->n_used_nodes + btree->n_free_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		for (i = 0; i < btree->n_used_nodes; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		hpfs_error(s, "internal btree %08x doesn't end with -1", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		f:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		for (j = i + 1; j < btree->n_used_nodes; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 			hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		btree->n_used_nodes = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		btree->n_free_nodes = nodes - btree->n_used_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 			brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		node = le32_to_cpu(btree->u.internal[i].down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		if (hpfs_sb(s)->sb_chk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 			if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		if (!(anode = hpfs_map_anode(s, node, &bh))) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		btree = &anode->btree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	}	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	nodes = btree->n_used_nodes + btree->n_free_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	for (i = 0; i < btree->n_used_nodes; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	ff:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		if (i) i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 			le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			- secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	for (j = i + 1; j < btree->n_used_nodes; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	btree->n_used_nodes = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	btree->n_free_nodes = nodes - btree->n_used_nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /* Remove file or directory and it's eas - note that directory must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)    be empty when this is called. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	struct fnode *fnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	struct extended_attribute *ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	struct extended_attribute *ea_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	ea_end = fnode_end_ea(fnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		if (ea_indirect(ea))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 			hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	hpfs_free_sectors(s, fno, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }