^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/fs/hfsplus/bitmap.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Brad Boyer (flar@allandria.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * (C) 2003 Ardis Technologies <roman@ardistech.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Handling of allocation file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include "hfsplus_fs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include "hfsplus_raw.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define PAGE_CACHE_BITS (PAGE_SIZE * 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) int hfsplus_block_allocate(struct super_block *sb, u32 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) u32 offset, u32 *max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) __be32 *pptr, *curr, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) u32 mask, start, len, n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) __be32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) len = *max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (!len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) hfs_dbg(BITMAP, "block_allocate: %u,%u,%u\n", size, offset, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) mutex_lock(&sbi->alloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) mapping = sbi->alloc_file->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (IS_ERR(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) start = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) pptr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) i = offset % 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) offset &= ~(PAGE_CACHE_BITS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if ((size ^ offset) / PAGE_CACHE_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) end = pptr + PAGE_CACHE_BITS / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* scan the first partial u32 for zero bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) val = *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (~val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) n = be32_to_cpu(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) mask = (1U << 31) >> i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) for (; i < 32; mask >>= 1, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if (!(n & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) curr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* scan complete u32s for the first zero bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) while (curr < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) val = *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (~val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) n = be32_to_cpu(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) mask = 1 << 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) for (i = 0; i < 32; mask >>= 1, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (!(n & mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) curr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) offset += PAGE_CACHE_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (offset >= size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (IS_ERR(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) start = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) curr = pptr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if ((size ^ offset) / PAGE_CACHE_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) end = pptr + PAGE_CACHE_BITS / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) end = pptr + ((size + 31) & (PAGE_CACHE_BITS - 1)) / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) hfs_dbg(BITMAP, "bitmap full\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) start = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) start = offset + (curr - pptr) * 32 + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (start >= size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) hfs_dbg(BITMAP, "bitmap full\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* do any partial u32 at the start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) len = min(size - start, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) n |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (++i >= 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) mask >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (!--len || n & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (!--len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *curr++ = cpu_to_be32(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* do full u32s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) while (curr < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) n = be32_to_cpu(*curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (len < 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) goto last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) len = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) goto last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) *curr++ = cpu_to_be32(0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) len -= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) offset += PAGE_CACHE_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (IS_ERR(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) start = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) pptr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) curr = pptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) end = pptr + PAGE_CACHE_BITS / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) last:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* do any partial u32 at end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) mask = 1U << 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) for (i = 0; i < len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (n & mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) n |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) mask >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) *curr = cpu_to_be32(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) *max = offset + (curr - pptr) * 32 + i - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) sbi->free_blocks -= *max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) hfsplus_mark_mdb_dirty(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) hfs_dbg(BITMAP, "-> %u,%u\n", start, *max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) mutex_unlock(&sbi->alloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct address_space *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) __be32 *pptr, *curr, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) u32 mask, len, pnr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* is there any actual work to be done? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) hfs_dbg(BITMAP, "block_free: %u,%u\n", offset, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /* are all of the bits in range? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if ((offset + count) > sbi->total_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) mutex_lock(&sbi->alloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) mapping = sbi->alloc_file->i_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) pnr = offset / PAGE_CACHE_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) page = read_mapping_page(mapping, pnr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) goto kaboom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) pptr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) end = pptr + PAGE_CACHE_BITS / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) len = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* do any partial u32 at the start */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) i = offset % 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) if (i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int j = 32 - i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) mask = 0xffffffffU << j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (j > count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) mask |= 0xffffffffU >> (i + count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *curr++ &= cpu_to_be32(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) *curr++ &= cpu_to_be32(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) count -= j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* do full u32s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) while (curr < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (count < 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) *curr++ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) count -= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) page = read_mapping_page(mapping, ++pnr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (IS_ERR(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) goto kaboom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) pptr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) curr = pptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) end = pptr + PAGE_CACHE_BITS / 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* do any partial u32 at end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) mask = 0xffffffffU >> count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) *curr &= cpu_to_be32(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) set_page_dirty(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) sbi->free_blocks += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) hfsplus_mark_mdb_dirty(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) mutex_unlock(&sbi->alloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) kaboom:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) pr_crit("unable to mark blocks free: error %ld\n", PTR_ERR(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) mutex_unlock(&sbi->alloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }