Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  linux/fs/hfsplus/extents.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Brad Boyer (flar@allandria.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * (C) 2003 Ardis Technologies <roman@ardistech.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Handling of Extents both in catalog and extents overflow trees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include "hfsplus_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include "hfsplus_raw.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /* Compare two extents keys, returns 0 on same, pos/neg for difference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 			const hfsplus_btree_key *k2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	__be32 k1id, k2id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	__be32 k1s, k2s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	k1id = k1->ext.cnid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	k2id = k2->ext.cnid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	if (k1id != k2id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 		return be32_to_cpu(k1id) < be32_to_cpu(k2id) ? -1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	if (k1->ext.fork_type != k2->ext.fork_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 		return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	k1s = k1->ext.start_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	k2s = k2->ext.start_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	if (k1s == k2s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	return be32_to_cpu(k1s) < be32_to_cpu(k2s) ? -1 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) static void hfsplus_ext_build_key(hfsplus_btree_key *key, u32 cnid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 				  u32 block, u8 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	key->key_len = cpu_to_be16(HFSPLUS_EXT_KEYLEN - 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	key->ext.cnid = cpu_to_be32(cnid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	key->ext.start_block = cpu_to_be32(block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	key->ext.fork_type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	key->ext.pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) static u32 hfsplus_ext_find_block(struct hfsplus_extent *ext, u32 off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	for (i = 0; i < 8; ext++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		count = be32_to_cpu(ext->block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		if (off < count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 			return be32_to_cpu(ext->start_block) + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		off -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	/* panic? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) static int hfsplus_ext_block_count(struct hfsplus_extent *ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	u32 count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	for (i = 0; i < 8; ext++, i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		count += be32_to_cpu(ext->block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static u32 hfsplus_ext_lastblock(struct hfsplus_extent *ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	ext += 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	for (i = 0; i < 7; ext--, i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		if (ext->block_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	return be32_to_cpu(ext->start_block) + be32_to_cpu(ext->block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static int __hfsplus_ext_write_extent(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		struct hfs_find_data *fd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	WARN_ON(!mutex_is_locked(&hip->extents_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	hfsplus_ext_build_key(fd->search_key, inode->i_ino, hip->cached_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			      HFSPLUS_IS_RSRC(inode) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 				HFSPLUS_TYPE_RSRC : HFSPLUS_TYPE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	res = hfs_brec_find(fd, hfs_find_rec_by_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	if (hip->extent_state & HFSPLUS_EXT_NEW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		if (res != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		/* Fail early and avoid ENOSPC during the btree operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 			return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		hfs_brec_insert(fd, hip->cached_extents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 				sizeof(hfsplus_extent_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		hfs_bnode_write(fd->bnode, hip->cached_extents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 				fd->entryoffset, fd->entrylength);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		hip->extent_state &= ~HFSPLUS_EXT_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	 * We can't just use hfsplus_mark_inode_dirty here, because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	 * also get called from hfsplus_write_inode, which should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	 * redirty the inode.  Instead the callers have to be careful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	 * to explicily mark the inode dirty, too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static int hfsplus_ext_write_extent_locked(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	int res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		struct hfs_find_data fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		res = __hfsplus_ext_write_extent(inode, &fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		hfs_find_exit(&fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) int hfsplus_ext_write_extent(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	mutex_lock(&HFSPLUS_I(inode)->extents_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	res = hfsplus_ext_write_extent_locked(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	mutex_unlock(&HFSPLUS_I(inode)->extents_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 					    struct hfsplus_extent *extent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 					    u32 cnid, u32 block, u8 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	hfsplus_ext_build_key(fd->search_key, cnid, block, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	fd->key->ext.cnid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	res = hfs_brec_find(fd, hfs_find_rec_by_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	if (res && res != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	if (fd->key->ext.cnid != fd->search_key->ext.cnid ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	    fd->key->ext.fork_type != fd->search_key->ext.fork_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	if (fd->entrylength != sizeof(hfsplus_extent_rec))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	hfs_bnode_read(fd->bnode, extent, fd->entryoffset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		sizeof(hfsplus_extent_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static inline int __hfsplus_ext_cache_extent(struct hfs_find_data *fd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		struct inode *inode, u32 block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	WARN_ON(!mutex_is_locked(&hip->extents_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (hip->extent_state & HFSPLUS_EXT_DIRTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		res = __hfsplus_ext_write_extent(inode, fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	res = __hfsplus_ext_read_extent(fd, hip->cached_extents, inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 					block, HFSPLUS_IS_RSRC(inode) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 						HFSPLUS_TYPE_RSRC :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 						HFSPLUS_TYPE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		hip->cached_start = be32_to_cpu(fd->key->ext.start_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		hip->cached_blocks =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			hfsplus_ext_block_count(hip->cached_extents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		hip->cached_start = hip->cached_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	struct hfs_find_data fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	if (block >= hip->cached_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	    block < hip->cached_start + hip->cached_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		res = __hfsplus_ext_cache_extent(&fd, inode, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		hfs_find_exit(&fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* Get a block at iblock for inode, possibly allocating if create */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) int hfsplus_get_block(struct inode *inode, sector_t iblock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		      struct buffer_head *bh_result, int create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	int res = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	u32 ablock, dblock, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	sector_t sector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	int was_dirty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	/* Convert inode block to disk allocation block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	ablock = iblock >> sbi->fs_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	if (iblock >= hip->fs_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		if (!create)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		if (iblock > hip->fs_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		if (ablock >= hip->alloc_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			res = hfsplus_file_extend(inode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 				return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		create = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	if (ablock < hip->first_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		dblock = hfsplus_ext_find_block(hip->first_extents, ablock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	if (inode->i_ino == HFSPLUS_EXT_CNID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	mutex_lock(&hip->extents_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	 * hfsplus_ext_read_extent will write out a cached extent into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	 * the extents btree.  In that case we may have to mark the inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	 * dirty even for a pure read of an extent here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	was_dirty = (hip->extent_state & HFSPLUS_EXT_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	res = hfsplus_ext_read_extent(inode, ablock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		mutex_unlock(&hip->extents_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	dblock = hfsplus_ext_find_block(hip->cached_extents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 					ablock - hip->cached_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	mutex_unlock(&hip->extents_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	hfs_dbg(EXTENT, "get_block(%lu): %llu - %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		inode->i_ino, (long long)iblock, dblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	mask = (1 << sbi->fs_shift) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	sector = ((sector_t)dblock << sbi->fs_shift) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		  sbi->blockoffset + (iblock & mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	map_bh(bh_result, sb, sector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	if (create) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		set_buffer_new(bh_result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		hip->phys_size += sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		hip->fs_blocks++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		inode_add_bytes(inode, sb->s_blocksize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	if (create || was_dirty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static void hfsplus_dump_extent(struct hfsplus_extent *extent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	hfs_dbg(EXTENT, "   ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		hfs_dbg_cont(EXTENT, " %u:%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			     be32_to_cpu(extent[i].start_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 			     be32_to_cpu(extent[i].block_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	hfs_dbg_cont(EXTENT, "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int hfsplus_add_extent(struct hfsplus_extent *extent, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 			      u32 alloc_block, u32 block_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	u32 count, start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	hfsplus_dump_extent(extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	for (i = 0; i < 8; extent++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		count = be32_to_cpu(extent->block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		if (offset == count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 			start = be32_to_cpu(extent->start_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 			if (alloc_block != start + count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 				if (++i >= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 					return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 				extent++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 				extent->start_block = cpu_to_be32(alloc_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 				block_count += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 			extent->block_count = cpu_to_be32(block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		} else if (offset < count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		offset -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	/* panic? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static int hfsplus_free_extents(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 				struct hfsplus_extent *extent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 				u32 offset, u32 block_nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	u32 count, start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	/* Mapping the allocation file may lock the extent tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	WARN_ON(mutex_is_locked(&HFSPLUS_SB(sb)->ext_tree->tree_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	hfsplus_dump_extent(extent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	for (i = 0; i < 8; extent++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		count = be32_to_cpu(extent->block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		if (offset == count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		else if (offset < count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		offset -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	/* panic? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		start = be32_to_cpu(extent->start_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		if (count <= block_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 			err = hfsplus_block_free(sb, start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 				pr_err("can't free extent\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 				hfs_dbg(EXTENT, " start: %u count: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 					start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			extent->block_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			extent->start_block = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 			block_nr -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 			count -= block_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 			err = hfsplus_block_free(sb, start + count, block_nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 			if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 				pr_err("can't free extent\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 				hfs_dbg(EXTENT, " start: %u count: %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 					start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			extent->block_count = cpu_to_be32(count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 			block_nr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		if (!block_nr || !i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			 * Try to free all extents and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 			 * return only last error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		extent--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		count = be32_to_cpu(extent->block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int hfsplus_free_fork(struct super_block *sb, u32 cnid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		struct hfsplus_fork_raw *fork, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	struct hfs_find_data fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	hfsplus_extent_rec ext_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	u32 total_blocks, blocks, start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	int res, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	total_blocks = be32_to_cpu(fork->total_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	if (!total_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		blocks += be32_to_cpu(fork->extents[i].block_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	res = hfsplus_free_extents(sb, fork->extents, blocks, blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	if (total_blocks == blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 						total_blocks, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 		if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		start = be32_to_cpu(fd.key->ext.start_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		hfs_brec_remove(&fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		mutex_unlock(&fd.tree->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		hfsplus_free_extents(sb, ext_entry, total_blocks - start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 				     total_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		total_blocks = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		mutex_lock(&fd.tree->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	} while (total_blocks > blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	hfs_find_exit(&fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) int hfsplus_file_extend(struct inode *inode, bool zeroout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	u32 start, len, goal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	if (sbi->alloc_file->i_size * 8 <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	    sbi->total_blocks - sbi->free_blocks + 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		/* extend alloc file */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		pr_err("extend alloc file! (%llu,%u,%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		       sbi->alloc_file->i_size * 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		       sbi->total_blocks, sbi->free_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	mutex_lock(&hip->extents_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	if (hip->alloc_blocks == hip->first_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		goal = hfsplus_ext_lastblock(hip->first_extents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		res = hfsplus_ext_read_extent(inode, hip->alloc_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		goal = hfsplus_ext_lastblock(hip->cached_extents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	len = hip->clump_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	start = hfsplus_block_allocate(sb, sbi->total_blocks, goal, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	if (start >= sbi->total_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		start = hfsplus_block_allocate(sb, goal, 0, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		if (start >= goal) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 			res = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	if (zeroout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		res = sb_issue_zeroout(sb, start, len, GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	if (hip->alloc_blocks <= hip->first_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		if (!hip->first_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 			hfs_dbg(EXTENT, "first extents\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 			/* no extents yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 			hip->first_extents[0].start_block = cpu_to_be32(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 			hip->first_extents[0].block_count = cpu_to_be32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 			res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 			/* try to append to extents in inode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 			res = hfsplus_add_extent(hip->first_extents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 						 hip->alloc_blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 						 start, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 			if (res == -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 				goto insert_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 			hfsplus_dump_extent(hip->first_extents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 			hip->first_blocks += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		res = hfsplus_add_extent(hip->cached_extents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 					 hip->alloc_blocks - hip->cached_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 					 start, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 			hfsplus_dump_extent(hip->cached_extents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			hip->extent_state |= HFSPLUS_EXT_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 			hip->cached_blocks += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		} else if (res == -ENOSPC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 			goto insert_extent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		hip->alloc_blocks += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		mutex_unlock(&hip->extents_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	mutex_unlock(&hip->extents_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) insert_extent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	hfs_dbg(EXTENT, "insert new extent\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	res = hfsplus_ext_write_extent_locked(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	hip->cached_extents[0].start_block = cpu_to_be32(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	hip->cached_extents[0].block_count = cpu_to_be32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	hfsplus_dump_extent(hip->cached_extents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	hip->extent_state |= HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	hip->cached_start = hip->alloc_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	hip->cached_blocks = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) void hfsplus_file_truncate(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	struct hfs_find_data fd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	u32 alloc_cnt, blk_cnt, start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	hfs_dbg(INODE, "truncate: %lu, %llu -> %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		inode->i_ino, (long long)hip->phys_size, inode->i_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	if (inode->i_size > hip->phys_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		struct address_space *mapping = inode->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		void *fsdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		loff_t size = inode->i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		res = pagecache_write_begin(NULL, mapping, size, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 					    &page, &fsdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		res = pagecache_write_end(NULL, mapping, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 			0, 0, page, fsdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		if (res < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	} else if (inode->i_size == hip->phys_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 			HFSPLUS_SB(sb)->alloc_blksz_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	mutex_lock(&hip->extents_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	alloc_cnt = hip->alloc_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	if (blk_cnt == alloc_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		mutex_unlock(&hip->extents_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		/* XXX: We lack error handling of hfsplus_file_truncate() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		if (alloc_cnt == hip->first_blocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 			mutex_unlock(&fd.tree->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 			hfsplus_free_extents(sb, hip->first_extents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 					     alloc_cnt, alloc_cnt - blk_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 			hfsplus_dump_extent(hip->first_extents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 			hip->first_blocks = blk_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 			mutex_lock(&fd.tree->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		res = __hfsplus_ext_cache_extent(&fd, inode, alloc_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		start = hip->cached_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		if (blk_cnt <= start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 			hfs_brec_remove(&fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		mutex_unlock(&fd.tree->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		hfsplus_free_extents(sb, hip->cached_extents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 				     alloc_cnt - start, alloc_cnt - blk_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		hfsplus_dump_extent(hip->cached_extents);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		mutex_lock(&fd.tree->tree_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 		if (blk_cnt > start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 			hip->extent_state |= HFSPLUS_EXT_DIRTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		alloc_cnt = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		hip->cached_start = hip->cached_blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		hip->extent_state &= ~(HFSPLUS_EXT_DIRTY | HFSPLUS_EXT_NEW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	hfs_find_exit(&fd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	hip->alloc_blocks = blk_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	mutex_unlock(&hip->extents_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	hip->phys_size = inode->i_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }