^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* -*- mode: c; c-basic-offset: 8; -*-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * vim: noexpandtab sw=8 ts=8 sts=0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * extent_map.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Block/Cluster mapping functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2004 Oracle. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/fiemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <cluster/masklog.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "ocfs2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include "alloc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include "dlmglue.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include "extent_map.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include "inode.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "super.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "symlink.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include "aops.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include "ocfs2_trace.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "buffer_head_io.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * The extent caching implementation is intentionally trivial.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * We only cache a small number of extents stored directly on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * inode, so linear order operations are acceptable. If we ever want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * to increase the size of the extent map, then these algorithms must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * get smarter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) void ocfs2_extent_map_init(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) oi->ip_extent_map.em_num_items = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) INIT_LIST_HEAD(&oi->ip_extent_map.em_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static void __ocfs2_extent_map_lookup(struct ocfs2_extent_map *em,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned int cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct ocfs2_extent_map_item **ret_emi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) unsigned int range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct ocfs2_extent_map_item *emi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *ret_emi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) list_for_each_entry(emi, &em->em_list, ei_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) range = emi->ei_cpos + emi->ei_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (cpos >= emi->ei_cpos && cpos < range) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) list_move(&emi->ei_list, &em->em_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *ret_emi = emi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static int ocfs2_extent_map_lookup(struct inode *inode, unsigned int cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned int *phys, unsigned int *len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned int *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) unsigned int coff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct ocfs2_extent_map_item *emi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) __ocfs2_extent_map_lookup(&oi->ip_extent_map, cpos, &emi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (emi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) coff = cpos - emi->ei_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *phys = emi->ei_phys + coff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) *len = emi->ei_clusters - coff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) *flags = emi->ei_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (emi == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * Forget about all clusters equal to or greater than cpos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void ocfs2_extent_map_trunc(struct inode *inode, unsigned int cpos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct ocfs2_extent_map_item *emi, *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct ocfs2_extent_map *em = &oi->ip_extent_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) LIST_HEAD(tmp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unsigned int range;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) list_for_each_entry_safe(emi, n, &em->em_list, ei_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (emi->ei_cpos >= cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Full truncate of this record. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) list_move(&emi->ei_list, &tmp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) BUG_ON(em->em_num_items == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) em->em_num_items--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) range = emi->ei_cpos + emi->ei_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (range > cpos) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Partial truncate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) emi->ei_clusters = cpos - emi->ei_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) list_for_each_entry_safe(emi, n, &tmp_list, ei_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) list_del(&emi->ei_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) kfree(emi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * Is any part of emi2 contained within emi1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static int ocfs2_ei_is_contained(struct ocfs2_extent_map_item *emi1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct ocfs2_extent_map_item *emi2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned int range1, range2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * Check if logical start of emi2 is inside emi1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) range1 = emi1->ei_cpos + emi1->ei_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (emi2->ei_cpos >= emi1->ei_cpos && emi2->ei_cpos < range1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * Check if logical end of emi2 is inside emi1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) range2 = emi2->ei_cpos + emi2->ei_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (range2 > emi1->ei_cpos && range2 <= range1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static void ocfs2_copy_emi_fields(struct ocfs2_extent_map_item *dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct ocfs2_extent_map_item *src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) dest->ei_cpos = src->ei_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) dest->ei_phys = src->ei_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) dest->ei_clusters = src->ei_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) dest->ei_flags = src->ei_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * Try to merge emi with ins. Returns 1 if merge succeeds, zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) struct ocfs2_extent_map_item *ins)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * Handle contiguousness
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (ins->ei_phys == (emi->ei_phys + emi->ei_clusters) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) ins->ei_cpos == (emi->ei_cpos + emi->ei_clusters) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ins->ei_flags == emi->ei_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) emi->ei_clusters += ins->ei_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) } else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) ins->ei_flags == emi->ei_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) emi->ei_phys = ins->ei_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) emi->ei_cpos = ins->ei_cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) emi->ei_clusters += ins->ei_clusters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * Overlapping extents - this shouldn't happen unless we've
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * split an extent to change it's flags. That is exceedingly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * rare, so there's no sense in trying to optimize it yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (ocfs2_ei_is_contained(emi, ins) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) ocfs2_ei_is_contained(ins, emi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) ocfs2_copy_emi_fields(emi, ins);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* No merge was possible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * In order to reduce complexity on the caller, this insert function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * is intentionally liberal in what it will accept.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * The only rule is that the truncate call *must* be used whenever
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * records have been deleted. This avoids inserting overlapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * records with different physical mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void ocfs2_extent_map_insert_rec(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct ocfs2_extent_rec *rec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct ocfs2_extent_map *em = &oi->ip_extent_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct ocfs2_extent_map_item *emi, *new_emi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct ocfs2_extent_map_item ins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ins.ei_cpos = le32_to_cpu(rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ins.ei_phys = ocfs2_blocks_to_clusters(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) le64_to_cpu(rec->e_blkno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ins.ei_clusters = le16_to_cpu(rec->e_leaf_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) ins.ei_flags = rec->e_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) search:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) spin_lock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) list_for_each_entry(emi, &em->em_list, ei_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if (ocfs2_try_to_merge_extent_map(emi, &ins)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) list_move(&emi->ei_list, &em->em_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * No item could be merged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * Either allocate and add a new item, or overwrite the last recently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * inserted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (em->em_num_items < OCFS2_MAX_EXTENT_MAP_ITEMS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (new_emi == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) new_emi = kmalloc(sizeof(*new_emi), GFP_NOFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (new_emi == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) goto search;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) ocfs2_copy_emi_fields(new_emi, &ins);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) list_add(&new_emi->ei_list, &em->em_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) em->em_num_items++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) new_emi = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) BUG_ON(list_empty(&em->em_list) || em->em_num_items == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) emi = list_entry(em->em_list.prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct ocfs2_extent_map_item, ei_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) list_move(&emi->ei_list, &em->em_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) ocfs2_copy_emi_fields(emi, &ins);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) spin_unlock(&oi->ip_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) kfree(new_emi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static int ocfs2_last_eb_is_empty(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) struct ocfs2_dinode *di)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int ret, next_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) u64 last_eb_blk = le64_to_cpu(di->i_last_eb_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) struct buffer_head *eb_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) ret = ocfs2_read_extent_block(INODE_CACHE(inode), last_eb_blk, &eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) eb = (struct ocfs2_extent_block *) eb_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (el->l_tree_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) ocfs2_error(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) "Inode %lu has non zero tree depth in leaf block %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) (unsigned long long)eb_bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) next_free = le16_to_cpu(el->l_next_free_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (next_free == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) (next_free == 1 && ocfs2_is_empty_extent(&el->l_recs[0])))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) brelse(eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * Return the 1st index within el which contains an extent start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * larger than v_cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static int ocfs2_search_for_hole_index(struct ocfs2_extent_list *el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) u32 v_cluster)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) rec = &el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (v_cluster < le32_to_cpu(rec->e_cpos))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * Figure out the size of a hole which starts at v_cluster within the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * extent list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * If there is no more allocation past v_cluster, we return the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * cluster size minus v_cluster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * If we have in-inode extents, then el points to the dinode list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * eb_bh is NULL. Otherwise, eb_bh should point to the extent block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * containing el.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int ocfs2_figure_hole_clusters(struct ocfs2_caching_info *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct ocfs2_extent_list *el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) struct buffer_head *eb_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) u32 v_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) u32 *num_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct buffer_head *next_eb_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) struct ocfs2_extent_block *eb, *next_eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) i = ocfs2_search_for_hole_index(el, v_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (i == le16_to_cpu(el->l_next_free_rec) && eb_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) eb = (struct ocfs2_extent_block *)eb_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * Check the next leaf for any extents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (le64_to_cpu(eb->h_next_leaf_blk) == 0ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) goto no_more_extents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) ret = ocfs2_read_extent_block(ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) le64_to_cpu(eb->h_next_leaf_blk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) &next_eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) next_eb = (struct ocfs2_extent_block *)next_eb_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) el = &next_eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) i = ocfs2_search_for_hole_index(el, v_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) no_more_extents:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (i == le16_to_cpu(el->l_next_free_rec)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * We're at the end of our existing allocation. Just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * return the maximum number of clusters we could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * possibly allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) *num_clusters = UINT_MAX - v_cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) *num_clusters = le32_to_cpu(el->l_recs[i].e_cpos) - v_cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) brelse(next_eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static int ocfs2_get_clusters_nocache(struct inode *inode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) u32 v_cluster, unsigned int *hole_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct ocfs2_extent_rec *ret_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) unsigned int *is_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int i, ret, tree_height, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct ocfs2_dinode *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) struct ocfs2_extent_list *el;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct buffer_head *eb_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) memset(ret_rec, 0, sizeof(*ret_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (is_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) *is_last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) di = (struct ocfs2_dinode *) di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) el = &di->id2.i_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) tree_height = le16_to_cpu(el->l_tree_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (tree_height > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) &eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) eb = (struct ocfs2_extent_block *) eb_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (el->l_tree_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) ocfs2_error(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) "Inode %lu has non zero tree depth in leaf block %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) (unsigned long long)eb_bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) i = ocfs2_search_extent_list(el, v_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (i == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * Holes can be larger than the maximum size of an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * extent, so we return their lengths in a separate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (hole_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) ret = ocfs2_figure_hole_clusters(INODE_CACHE(inode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) el, eb_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) v_cluster, &len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) *hole_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) goto out_hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) rec = &el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (!rec->e_blkno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) ocfs2_error(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) "Inode %lu has bad extent record (%u, %u, 0)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) le32_to_cpu(rec->e_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) ocfs2_rec_clusters(el, rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) *ret_rec = *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * Checking for last extent is potentially expensive - we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * might have to look at the next leaf over to see if it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * The first two checks are to see whether the caller even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * cares for this information, and if the extent is at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * the last in it's list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * If those hold true, then the extent is last if any of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * additional conditions hold true:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * - Extent list is in-inode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * - Extent list is right-most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * - Extent list is 2nd to rightmost, with empty right-most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (is_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (i == (le16_to_cpu(el->l_next_free_rec) - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (tree_height == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) *is_last = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) else if (eb->h_blkno == di->i_last_eb_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) *is_last = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) else if (eb->h_next_leaf_blk == di->i_last_eb_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ret = ocfs2_last_eb_is_empty(inode, di);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (ret == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) *is_last = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) out_hole:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) brelse(eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) static void ocfs2_relative_extent_offsets(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) u32 v_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct ocfs2_extent_rec *rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) u32 *p_cluster, u32 *num_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) u32 coff = v_cluster - le32_to_cpu(rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) *p_cluster = ocfs2_blocks_to_clusters(sb, le64_to_cpu(rec->e_blkno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) *p_cluster = *p_cluster + coff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (num_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *num_clusters = le16_to_cpu(rec->e_leaf_clusters) - coff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) int ocfs2_xattr_get_clusters(struct inode *inode, u32 v_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) u32 *p_cluster, u32 *num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct ocfs2_extent_list *el,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) unsigned int *extent_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) int ret = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct buffer_head *eb_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct ocfs2_extent_block *eb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) struct ocfs2_extent_rec *rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) u32 coff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (el->l_tree_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) ret = ocfs2_find_leaf(INODE_CACHE(inode), el, v_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) &eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) eb = (struct ocfs2_extent_block *) eb_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) el = &eb->h_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (el->l_tree_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) ocfs2_error(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) "Inode %lu has non zero tree depth in xattr leaf block %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) (unsigned long long)eb_bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) i = ocfs2_search_extent_list(el, v_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (i == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) rec = &el->l_recs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) BUG_ON(v_cluster < le32_to_cpu(rec->e_cpos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (!rec->e_blkno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ocfs2_error(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) "Inode %lu has bad extent record (%u, %u, 0) in xattr\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) le32_to_cpu(rec->e_cpos),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) ocfs2_rec_clusters(el, rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) ret = -EROFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) coff = v_cluster - le32_to_cpu(rec->e_cpos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) *p_cluster = ocfs2_blocks_to_clusters(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) le64_to_cpu(rec->e_blkno));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) *p_cluster = *p_cluster + coff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (num_clusters)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) *num_clusters = ocfs2_rec_clusters(el, rec) - coff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (extent_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) *extent_flags = rec->e_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) brelse(eb_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int ocfs2_get_clusters(struct inode *inode, u32 v_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) u32 *p_cluster, u32 *num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) unsigned int *extent_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) unsigned int hole_len, flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct buffer_head *di_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct ocfs2_extent_rec rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ret = -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) ret = ocfs2_extent_map_lookup(inode, v_cluster, p_cluster,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) num_clusters, extent_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ret = ocfs2_read_inode_block(inode, &di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) ret = ocfs2_get_clusters_nocache(inode, di_bh, v_cluster, &hole_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) &rec, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (rec.e_blkno == 0ULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) * A hole was found. Return some canned values that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * callers can key on. If asked for, num_clusters will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * be populated with the size of the hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) *p_cluster = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (num_clusters) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) *num_clusters = hole_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ocfs2_relative_extent_offsets(inode->i_sb, v_cluster, &rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) p_cluster, num_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) flags = rec.e_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) ocfs2_extent_map_insert_rec(inode, &rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) if (extent_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) *extent_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) brelse(di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * This expects alloc_sem to be held. The allocation cannot change at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * all while the map is in the process of being updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) int ocfs2_extent_map_get_blocks(struct inode *inode, u64 v_blkno, u64 *p_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) u64 *ret_count, unsigned int *extent_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) int bpc = ocfs2_clusters_to_blocks(inode->i_sb, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) u32 cpos, num_clusters, p_cluster;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) u64 boff = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) cpos = ocfs2_blocks_to_clusters(inode->i_sb, v_blkno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) ret = ocfs2_get_clusters(inode, cpos, &p_cluster, &num_clusters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) extent_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * p_cluster == 0 indicates a hole.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (p_cluster) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) boff = ocfs2_clusters_to_blocks(inode->i_sb, p_cluster);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) boff += (v_blkno & (u64)(bpc - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) *p_blkno = boff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (ret_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) *ret_count = ocfs2_clusters_to_blocks(inode->i_sb, num_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) *ret_count -= v_blkno & (u64)(bpc - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * The ocfs2_fiemap_inline() may be a little bit misleading, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * it not only handles the fiemap for inlined files, but also deals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * with the fast symlink, cause they have no difference for extent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * mapping per se.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static int ocfs2_fiemap_inline(struct inode *inode, struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct fiemap_extent_info *fieinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) u64 map_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) unsigned int id_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) struct ocfs2_dinode *di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) u64 phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) u32 flags = FIEMAP_EXTENT_DATA_INLINE|FIEMAP_EXTENT_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct ocfs2_inode_info *oi = OCFS2_I(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) di = (struct ocfs2_dinode *)di_bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (ocfs2_inode_is_fast_symlink(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) id_count = ocfs2_fast_symlink_chars(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) id_count = le16_to_cpu(di->id2.i_data.id_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (map_start < id_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) phys = oi->ip_blkno << inode->i_sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (ocfs2_inode_is_fast_symlink(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) phys += offsetof(struct ocfs2_dinode, id2.i_symlink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) phys += offsetof(struct ocfs2_dinode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) id2.i_data.id_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) ret = fiemap_fill_next_extent(fieinfo, 0, phys, id_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) u64 map_start, u64 map_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) int ret, is_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) u32 mapping_end, cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) unsigned int hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) u64 len_bytes, phys_bytes, virt_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) struct buffer_head *di_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct ocfs2_extent_rec rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) ret = fiemap_prep(inode, fieinfo, map_start, &map_len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) ret = ocfs2_inode_lock(inode, &di_bh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) down_read(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * Handle inline-data and fast symlink separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if ((OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) ocfs2_inode_is_fast_symlink(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) ret = ocfs2_fiemap_inline(inode, di_bh, fieinfo, map_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) cpos = map_start >> osb->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) map_start + map_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) is_last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) while (cpos < mapping_end && !is_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) u32 fe_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) &hole_size, &rec, &is_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) if (rec.e_blkno == 0ULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) cpos += hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) fe_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (rec.e_flags & OCFS2_EXT_UNWRITTEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) fe_flags |= FIEMAP_EXTENT_UNWRITTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (rec.e_flags & OCFS2_EXT_REFCOUNTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) fe_flags |= FIEMAP_EXTENT_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (is_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) fe_flags |= FIEMAP_EXTENT_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) len_bytes = (u64)le16_to_cpu(rec.e_leaf_clusters) << osb->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) phys_bytes = le64_to_cpu(rec.e_blkno) << osb->sb->s_blocksize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) virt_bytes = (u64)le32_to_cpu(rec.e_cpos) << osb->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ret = fiemap_fill_next_extent(fieinfo, virt_bytes, phys_bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) len_bytes, fe_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) cpos = le32_to_cpu(rec.e_cpos)+ le16_to_cpu(rec.e_leaf_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) brelse(di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) up_read(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) ocfs2_inode_unlock(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* Is IO overwriting allocated blocks? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) int ocfs2_overwrite_io(struct inode *inode, struct buffer_head *di_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) u64 map_start, u64 map_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) int ret = 0, is_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) u32 mapping_end, cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct ocfs2_extent_rec rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (ocfs2_size_fits_inline_data(di_bh, map_start + map_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) cpos = map_start >> osb->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) map_start + map_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) is_last = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) while (cpos < mapping_end && !is_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) NULL, &rec, &is_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (rec.e_blkno == 0ULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (rec.e_flags & OCFS2_EXT_REFCOUNTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) cpos = le32_to_cpu(rec.e_cpos) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) le16_to_cpu(rec.e_leaf_clusters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (cpos < mapping_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ret = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) int ocfs2_seek_data_hole_offset(struct file *file, loff_t *offset, int whence)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct inode *inode = file->f_mapping->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) unsigned int is_last = 0, is_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) u16 cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) u32 cpos, cend, clen, hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) u64 extoff, extlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct buffer_head *di_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct ocfs2_extent_rec rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) BUG_ON(whence != SEEK_DATA && whence != SEEK_HOLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) ret = ocfs2_inode_lock(inode, &di_bh, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) down_read(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (*offset >= i_size_read(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (whence == SEEK_HOLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) *offset = i_size_read(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) clen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) cpos = *offset >> cs_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) cend = ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) while (cpos < cend && !is_last) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) ret = ocfs2_get_clusters_nocache(inode, di_bh, cpos, &hole_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) &rec, &is_last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) mlog_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) extoff = cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) extoff <<= cs_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) if (rec.e_blkno == 0ULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) clen = hole_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) is_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) clen = le16_to_cpu(rec.e_leaf_clusters) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) (cpos - le32_to_cpu(rec.e_cpos));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) is_data = (rec.e_flags & OCFS2_EXT_UNWRITTEN) ? 0 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if ((!is_data && whence == SEEK_HOLE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) (is_data && whence == SEEK_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (extoff > *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) *offset = extoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (!is_last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) cpos += clen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) if (whence == SEEK_HOLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) extoff = cpos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) extoff <<= cs_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) extlen = clen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) extlen <<= cs_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) if ((extoff + extlen) > i_size_read(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) extlen = i_size_read(inode) - extoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) extoff += extlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (extoff > *offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) *offset = extoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) brelse(di_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) up_read(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ocfs2_inode_unlock(inode, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) int ocfs2_read_virt_blocks(struct inode *inode, u64 v_block, int nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct buffer_head *bhs[], int flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) int (*validate)(struct super_block *sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) struct buffer_head *bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) u64 p_block, p_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) int i, count, done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) trace_ocfs2_read_virt_blocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) inode, (unsigned long long)v_block, nr, bhs, flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) validate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (((v_block + nr - 1) << inode->i_sb->s_blocksize_bits) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) i_size_read(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) BUG_ON(!(flags & OCFS2_BH_READAHEAD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) while (done < nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) down_read(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) rc = ocfs2_extent_map_get_blocks(inode, v_block + done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) &p_block, &p_count, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) up_read(&OCFS2_I(inode)->ip_alloc_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) mlog_errno(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) if (!p_block) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) mlog(ML_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) "Inode #%llu contains a hole at offset %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) (unsigned long long)OCFS2_I(inode)->ip_blkno,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) (unsigned long long)(v_block + done) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) inode->i_sb->s_blocksize_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) count = nr - done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (p_count < count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) count = p_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * If the caller passed us bhs, they should have come
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * from a previous readahead call to this function. Thus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * they should have the right b_blocknr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) for (i = 0; i < count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) if (!bhs[done + i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) BUG_ON(bhs[done + i]->b_blocknr != (p_block + i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) bhs + done, flags, validate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) mlog_errno(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) done += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)