^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/fs/ext2/xattr.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Fix by Harrison Xing <harrison@mountainviewdata.com>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Extended attributes for symlinks and special files added per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * suggestion of Luka Renko <luka.renko@hermes.si>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Red Hat Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Extended attributes are stored on disk blocks allocated outside of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * any inode. The i_file_acl field is then made to point to this allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * block. If all extended attributes of an inode are identical, these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * inodes may share the same extended attribute block. Such situations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * are automatically detected by keeping a cache of recent attribute block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * numbers and hashes over the block's contents in memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * Extended attribute block layout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * +------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * | header |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * | entry 1 | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * | entry 2 | | growing downwards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * | entry 3 | v
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * | four null bytes |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * | . . . |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * | value 1 | ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * | value 3 | | growing upwards
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * | value 2 | |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * +------------------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * The block header is followed by multiple entry descriptors. These entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * descriptors are variable in size, and aligned to EXT2_XATTR_PAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * byte boundaries. The entry descriptors are sorted by attribute name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * so that two extended attribute blocks can be compared efficiently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Attribute values are aligned to the end of the block, stored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * no specific order. They are also padded to EXT2_XATTR_PAD byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * boundaries. No additional gaps are left between them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * Locking strategy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * ----------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * EXT2_I(inode)->i_file_acl is protected by EXT2_I(inode)->xattr_sem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * EA blocks are only changed if they are exclusive to an inode, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * holding xattr_sem also means that nothing but the EA block's reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * count will change. Multiple writers to an EA block are synchronized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * by the bh lock. No more than a single bh lock is held at any time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * to avoid deadlocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include <linux/buffer_head.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #include <linux/mbcache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #include <linux/quotaops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #include <linux/rwsem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #include "ext2.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #include "xattr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #include "acl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #ifdef EXT2_XATTR_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) # define ea_idebug(inode, f...) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) printk(KERN_DEBUG "inode %s:%ld: ", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) inode->i_sb->s_id, inode->i_ino); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) printk(f); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) printk("\n"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) # define ea_bdebug(bh, f...) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) printk(KERN_DEBUG "block %pg:%lu: ", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) bh->b_bdev, (unsigned long) bh->b_blocknr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) printk(f); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) printk("\n"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) # define ea_idebug(inode, f...) no_printk(f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) # define ea_bdebug(bh, f...) no_printk(f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static int ext2_xattr_set2(struct inode *, struct buffer_head *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct ext2_xattr_header *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static int ext2_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static struct buffer_head *ext2_xattr_cache_find(struct inode *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) struct ext2_xattr_header *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static void ext2_xattr_rehash(struct ext2_xattr_header *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct ext2_xattr_entry *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static const struct xattr_handler *ext2_xattr_handler_map[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) [EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #ifdef CONFIG_EXT2_FS_POSIX_ACL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) [EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) [EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) [EXT2_XATTR_INDEX_TRUSTED] = &ext2_xattr_trusted_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #ifdef CONFIG_EXT2_FS_SECURITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) [EXT2_XATTR_INDEX_SECURITY] = &ext2_xattr_security_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) const struct xattr_handler *ext2_xattr_handlers[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) &ext2_xattr_user_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) &ext2_xattr_trusted_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #ifdef CONFIG_EXT2_FS_POSIX_ACL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) &posix_acl_access_xattr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) &posix_acl_default_xattr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #ifdef CONFIG_EXT2_FS_SECURITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) &ext2_xattr_security_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define EA_BLOCK_CACHE(inode) (EXT2_SB(inode->i_sb)->s_ea_block_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline const struct xattr_handler *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ext2_xattr_handler(int name_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) const struct xattr_handler *handler = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) handler = ext2_xattr_handler_map[name_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) ext2_xattr_header_valid(struct ext2_xattr_header *header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) header->h_blocks != cpu_to_le32(1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ext2_xattr_entry_valid(struct ext2_xattr_entry *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) char *end, size_t end_offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct ext2_xattr_entry *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) next = EXT2_XATTR_NEXT(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if ((char *)next >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (entry->e_value_block != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) size = le32_to_cpu(entry->e_value_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (size > end_offs ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) le16_to_cpu(entry->e_value_offs) + size > end_offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ext2_xattr_cmp_entry(int name_index, size_t name_len, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) struct ext2_xattr_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) int cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) cmp = name_index - entry->e_name_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (!cmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) cmp = name_len - entry->e_name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (!cmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) cmp = memcmp(name, entry->e_name, name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * ext2_xattr_get()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * Copy an extended attribute into the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * provided, or compute the buffer size required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Buffer is NULL to compute the size of the buffer required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * Returns a negative error number on failure, or the number of bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * used / required on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) ext2_xattr_get(struct inode *inode, int name_index, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void *buffer, size_t buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct ext2_xattr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) size_t name_len, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) char *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int error, not_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) name_index, name, buffer, (long)buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (name == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) name_len = strlen(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (name_len > 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) down_read(&EXT2_I(inode)->xattr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) error = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (!EXT2_I(inode)->i_file_acl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) if (!bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ea_bdebug(bh, "b_count=%d, refcount=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) end = bh->b_data + bh->b_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (!ext2_xattr_header_valid(HDR(bh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) bad_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) ext2_error(inode->i_sb, "ext2_xattr_get",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) "inode %ld: bad block %d", inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) EXT2_I(inode)->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* find named attribute */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) entry = FIRST_ENTRY(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) while (!IS_LAST_ENTRY(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!ext2_xattr_entry_valid(entry, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) inode->i_sb->s_blocksize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) goto bad_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) not_found = ext2_xattr_cmp_entry(name_index, name_len, name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (!not_found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (not_found < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) entry = EXT2_XATTR_NEXT(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (ext2_xattr_cache_insert(ea_block_cache, bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ea_idebug(inode, "cache insert failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) error = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) size = le32_to_cpu(entry->e_value_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (ext2_xattr_cache_insert(ea_block_cache, bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ea_idebug(inode, "cache insert failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) error = -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (size > buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* return value of attribute */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) error = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) up_read(&EXT2_I(inode)->xattr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * ext2_xattr_list()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Copy a list of attribute names into the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * provided, or compute the buffer size required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * Buffer is NULL to compute the size of the buffer required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * Returns a negative error number on failure, or the number of bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * used / required on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct inode *inode = d_inode(dentry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct ext2_xattr_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) char *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) size_t rest = buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ea_idebug(inode, "buffer=%p, buffer_size=%ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) buffer, (long)buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) down_read(&EXT2_I(inode)->xattr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (!EXT2_I(inode)->i_file_acl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (!bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ea_bdebug(bh, "b_count=%d, refcount=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) end = bh->b_data + bh->b_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (!ext2_xattr_header_valid(HDR(bh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) bad_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ext2_error(inode->i_sb, "ext2_xattr_list",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) "inode %ld: bad block %d", inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) EXT2_I(inode)->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* check the on-disk data structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) entry = FIRST_ENTRY(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) while (!IS_LAST_ENTRY(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (!ext2_xattr_entry_valid(entry, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) inode->i_sb->s_blocksize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) goto bad_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) entry = EXT2_XATTR_NEXT(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (ext2_xattr_cache_insert(ea_block_cache, bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ea_idebug(inode, "cache insert failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /* list the attribute names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) entry = EXT2_XATTR_NEXT(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) const struct xattr_handler *handler =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) ext2_xattr_handler(entry->e_name_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (handler && (!handler->list || handler->list(dentry))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) const char *prefix = handler->prefix ?: handler->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) size_t prefix_len = strlen(prefix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) size_t size = prefix_len + entry->e_name_len + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (size > rest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) error = -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) memcpy(buffer, prefix, prefix_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) buffer += prefix_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) memcpy(buffer, entry->e_name, entry->e_name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) buffer += entry->e_name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) *buffer++ = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) rest -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) error = buffer_size - rest; /* total size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) up_read(&EXT2_I(inode)->xattr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * Inode operation listxattr()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * d_inode(dentry)->i_mutex: don't care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) ext2_listxattr(struct dentry *dentry, char *buffer, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return ext2_xattr_list(dentry, buffer, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * not set, set it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static void ext2_xattr_update_super_block(struct super_block *sb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) spin_lock(&EXT2_SB(sb)->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ext2_update_dynamic_rev(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) spin_unlock(&EXT2_SB(sb)->s_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * ext2_xattr_set()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * Create, replace or remove an extended attribute for this inode. Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * is NULL to remove an existing extended attribute, and non-NULL to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * either replace an existing extended attribute, or create a new extended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * attribute. The flags XATTR_REPLACE and XATTR_CREATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * specify that an extended attribute must exist and must not exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * previous to the call, respectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * Returns 0, or a negative error number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) ext2_xattr_set(struct inode *inode, int name_index, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) const void *value, size_t value_len, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct ext2_xattr_header *header = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) struct ext2_xattr_entry *here = NULL, *last = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) size_t name_len, free, min_offs = sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) int not_found = 1, error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) char *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * header -- Points either into bh, or to a temporarily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * allocated buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * here -- The named entry found, or the place for inserting, within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * the block pointed to by header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * last -- Points right after the last named entry within the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * pointed to by header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * min_offs -- The offset of the first value (values are aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * towards the end of the block).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * end -- Points right after the block pointed to by header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) name_index, name, value, (long)value_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (value == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) value_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (name == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) name_len = strlen(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (name_len > 255 || value_len > sb->s_blocksize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) error = dquot_initialize(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) down_write(&EXT2_I(inode)->xattr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (EXT2_I(inode)->i_file_acl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) /* The inode already has an extended attribute block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) bh = sb_bread(sb, EXT2_I(inode)->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (!bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ea_bdebug(bh, "b_count=%d, refcount=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) atomic_read(&(bh->b_count)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) le32_to_cpu(HDR(bh)->h_refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) header = HDR(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) end = bh->b_data + bh->b_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (!ext2_xattr_header_valid(header)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) bad_block:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ext2_error(sb, "ext2_xattr_set",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) "inode %ld: bad block %d", inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) EXT2_I(inode)->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * Find the named attribute. If not found, 'here' will point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * to entry where the new attribute should be inserted to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * maintain sorting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) last = FIRST_ENTRY(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) while (!IS_LAST_ENTRY(last)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (!ext2_xattr_entry_valid(last, end, sb->s_blocksize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) goto bad_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (last->e_value_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) size_t offs = le16_to_cpu(last->e_value_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (offs < min_offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) min_offs = offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (not_found > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) not_found = ext2_xattr_cmp_entry(name_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) name_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) name, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (not_found <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) here = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) last = EXT2_XATTR_NEXT(last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (not_found > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) here = last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /* Check whether we have enough space left. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) free = min_offs - ((char*)last - (char*)header) - sizeof(__u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* We will use a new extended attribute block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) free = sb->s_blocksize -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) sizeof(struct ext2_xattr_header) - sizeof(__u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (not_found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /* Request to remove a nonexistent attribute? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) error = -ENODATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (flags & XATTR_REPLACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (value == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /* Request to create an existing attribute? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) error = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (flags & XATTR_CREATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) free += EXT2_XATTR_SIZE(le32_to_cpu(here->e_value_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) free += EXT2_XATTR_LEN(name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) error = -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (free < EXT2_XATTR_LEN(name_len) + EXT2_XATTR_SIZE(value_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /* Here we know that we can set the new attribute. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (header) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* assert(header == HDR(bh)); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) if (header->h_refcount == cpu_to_le32(1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) __u32 hash = le32_to_cpu(header->h_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ea_bdebug(bh, "modifying in-place");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * This must happen under buffer lock for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * ext2_xattr_set2() to reliably detect modified block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /* keep the buffer locked while modifying it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) ea_bdebug(bh, "cloning");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (header == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) header->h_refcount = cpu_to_le32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) offset = (char *)here - bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) here = ENTRY((char *)header + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) offset = (char *)last - bh->b_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) last = ENTRY((char *)header + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) /* Allocate a buffer where we construct the new block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) header = kzalloc(sb->s_blocksize, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) if (header == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) end = (char *)header + sb->s_blocksize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) header->h_blocks = header->h_refcount = cpu_to_le32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) last = here = ENTRY(header+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* Iff we are modifying the block in-place, bh is locked here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (not_found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* Insert the new name. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) size_t size = EXT2_XATTR_LEN(name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) size_t rest = (char *)last - (char *)here;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) memmove((char *)here + size, here, rest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) memset(here, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) here->e_name_index = name_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) here->e_name_len = name_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) memcpy(here->e_name, name, name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (here->e_value_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) char *first_val = (char *)header + min_offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) size_t offs = le16_to_cpu(here->e_value_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) char *val = (char *)header + offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) size_t size = EXT2_XATTR_SIZE(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) le32_to_cpu(here->e_value_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (size == EXT2_XATTR_SIZE(value_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* The old and the new value have the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) size. Just replace. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) here->e_value_size = cpu_to_le32(value_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) memset(val + size - EXT2_XATTR_PAD, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) EXT2_XATTR_PAD); /* Clear pad bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) memcpy(val, value, value_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) goto skip_replace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* Remove the old value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) memmove(first_val + size, first_val, val - first_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) memset(first_val, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) min_offs += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /* Adjust all value offsets. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) last = ENTRY(header+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) while (!IS_LAST_ENTRY(last)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) size_t o = le16_to_cpu(last->e_value_offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (o < offs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) last->e_value_offs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) cpu_to_le16(o + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) last = EXT2_XATTR_NEXT(last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) here->e_value_offs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (value == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* Remove the old name. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) size_t size = EXT2_XATTR_LEN(name_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) last = ENTRY((char *)last - size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) memmove(here, (char*)here + size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) (char*)last - (char*)here);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) memset(last, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (value != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) /* Insert the new value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) here->e_value_size = cpu_to_le32(value_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (value_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) size_t size = EXT2_XATTR_SIZE(value_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) char *val = (char *)header + min_offs - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) here->e_value_offs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) cpu_to_le16((char *)val - (char *)header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) memset(val + size - EXT2_XATTR_PAD, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) EXT2_XATTR_PAD); /* Clear the pad bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) memcpy(val, value, value_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) skip_replace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (IS_LAST_ENTRY(ENTRY(header+1))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /* This block is now empty. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (bh && header == HDR(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) unlock_buffer(bh); /* we were modifying in-place. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) error = ext2_xattr_set2(inode, bh, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ext2_xattr_rehash(header, here);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (bh && header == HDR(bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) unlock_buffer(bh); /* we were modifying in-place. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) error = ext2_xattr_set2(inode, bh, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (!(bh && header == HDR(bh)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) kfree(header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) up_write(&EXT2_I(inode)->xattr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * Second half of ext2_xattr_set(): Update the file system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct ext2_xattr_header *header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct super_block *sb = inode->i_sb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) struct buffer_head *new_bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (header) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) new_bh = ext2_xattr_cache_find(inode, header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (new_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* We found an identical block in the cache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (new_bh == old_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ea_bdebug(new_bh, "keeping this block");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* The old block is released after updating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) the inode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ea_bdebug(new_bh, "reusing block");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) error = dquot_alloc_block(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) unlock_buffer(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) le32_add_cpu(&HDR(new_bh)->h_refcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) ea_bdebug(new_bh, "refcount now=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) le32_to_cpu(HDR(new_bh)->h_refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) unlock_buffer(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) } else if (old_bh && header == HDR(old_bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /* Keep this block. No need to lock the block as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) don't need to change the reference count. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) new_bh = old_bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) get_bh(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) ext2_xattr_cache_insert(ea_block_cache, new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /* We need to allocate a new block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) ext2_fsblk_t goal = ext2_group_first_block_no(sb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) EXT2_I(inode)->i_block_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) int block = ext2_new_block(inode, goal, &error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) ea_idebug(inode, "creating block %d", block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) new_bh = sb_getblk(sb, block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (unlikely(!new_bh)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) ext2_free_blocks(inode, block, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) lock_buffer(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) memcpy(new_bh->b_data, header, new_bh->b_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) set_buffer_uptodate(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) unlock_buffer(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) ext2_xattr_cache_insert(ea_block_cache, new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) ext2_xattr_update_super_block(sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) mark_buffer_dirty(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (IS_SYNC(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) sync_dirty_buffer(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) error = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (buffer_req(new_bh) && !buffer_uptodate(new_bh))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) /* Update the inode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) inode->i_ctime = current_time(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (IS_SYNC(inode)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) error = sync_inode_metadata(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /* In case sync failed due to ENOSPC the inode was actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * written (only some dirty data were not) so we just proceed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * as if nothing happened and cleanup the unused block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (error && error != -ENOSPC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (new_bh && new_bh != old_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) dquot_free_block_nodirty(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) if (old_bh && old_bh != new_bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * If there was an old block and we are no longer using it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) * release the old block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) lock_buffer(old_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (HDR(old_bh)->h_refcount == cpu_to_le32(1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) __u32 hash = le32_to_cpu(HDR(old_bh)->h_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * This must happen under buffer lock for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * ext2_xattr_set2() to reliably detect freed block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) mb_cache_entry_delete(ea_block_cache, hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) old_bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) /* Free the old block. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ea_bdebug(old_bh, "freeing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) ext2_free_blocks(inode, old_bh->b_blocknr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) /* We let our caller release old_bh, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) * need to duplicate the buffer before. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) get_bh(old_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) bforget(old_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) /* Decrement the refcount only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) dquot_free_block_nodirty(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) mark_inode_dirty(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) mark_buffer_dirty(old_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ea_bdebug(old_bh, "refcount now=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) le32_to_cpu(HDR(old_bh)->h_refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) unlock_buffer(old_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) brelse(new_bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * ext2_xattr_delete_inode()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * Free extended attribute resources associated with this inode. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * is called immediately before an inode is freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ext2_xattr_delete_inode(struct inode *inode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) struct buffer_head *bh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * We are the only ones holding inode reference. The xattr_sem should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * better be unlocked! We could as well just not acquire xattr_sem at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * all but this makes the code more futureproof. OTOH we need trylock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * here to avoid false-positive warning from lockdep about reclaim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) * circular dependency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (WARN_ON_ONCE(!down_write_trylock(&EXT2_I(inode)->xattr_sem)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (!EXT2_I(inode)->i_file_acl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (!ext2_data_block_valid(sbi, EXT2_I(inode)->i_file_acl, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) "inode %ld: xattr block %d is out of data blocks range",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) inode->i_ino, EXT2_I(inode)->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (!bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) "inode %ld: block %d read error", inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) EXT2_I(inode)->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (!ext2_xattr_header_valid(HDR(bh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) "inode %ld: bad block %d", inode->i_ino,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) EXT2_I(inode)->i_file_acl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) goto cleanup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * This must happen under buffer lock for ext2_xattr_set2() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * reliably detect freed block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) bh->b_blocknr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) get_bh(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) bforget(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) le32_add_cpu(&HDR(bh)->h_refcount, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) ea_bdebug(bh, "refcount now=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) le32_to_cpu(HDR(bh)->h_refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) mark_buffer_dirty(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (IS_SYNC(inode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) sync_dirty_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) dquot_free_block_nodirty(inode, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) EXT2_I(inode)->i_file_acl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) cleanup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) up_write(&EXT2_I(inode)->xattr_sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * ext2_xattr_cache_insert()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * Create a new entry in the extended attribute cache, and insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * it unless such an entry is already in the cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * Returns 0, or a negative error number on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) if (error == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) ea_bdebug(bh, "already in cache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) ea_bdebug(bh, "inserting [%x]", (int)hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * ext2_xattr_cmp()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * Compare two extended attribute blocks for equality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * Returns 0 if the blocks are equal, 1 if they differ, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * a negative error number on errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) ext2_xattr_cmp(struct ext2_xattr_header *header1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) struct ext2_xattr_header *header2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct ext2_xattr_entry *entry1, *entry2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) entry1 = ENTRY(header1+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) entry2 = ENTRY(header2+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) while (!IS_LAST_ENTRY(entry1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) if (IS_LAST_ENTRY(entry2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) if (entry1->e_hash != entry2->e_hash ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) entry1->e_name_index != entry2->e_name_index ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) entry1->e_name_len != entry2->e_name_len ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) entry1->e_value_size != entry2->e_value_size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) (char *)header2 + le16_to_cpu(entry2->e_value_offs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) le32_to_cpu(entry1->e_value_size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) entry1 = EXT2_XATTR_NEXT(entry1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) entry2 = EXT2_XATTR_NEXT(entry2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (!IS_LAST_ENTRY(entry2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) * ext2_xattr_cache_find()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * Find an identical extended attribute block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * Returns a locked buffer head to the block found, or NULL if such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * a block was not found or an error occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) static struct buffer_head *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) __u32 hash = le32_to_cpu(header->h_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct mb_cache_entry *ce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (!header->h_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) return NULL; /* never share */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) ce = mb_cache_entry_find_first(ea_block_cache, hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) while (ce) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct buffer_head *bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) bh = sb_bread(inode->i_sb, ce->e_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (!bh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ext2_error(inode->i_sb, "ext2_xattr_cache_find",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) "inode %ld: block %ld read error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) inode->i_ino, (unsigned long) ce->e_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) lock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * We have to be careful about races with freeing or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) * rehashing of xattr block. Once we hold buffer lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * xattr block's state is stable so we can check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * whether the block got freed / rehashed or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * Since we unhash mbcache entry under buffer lock when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * freeing / rehashing xattr block, checking whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * entry is still hashed is reliable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) if (hlist_bl_unhashed(&ce->e_hash_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) mb_cache_entry_put(ea_block_cache, ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) } else if (le32_to_cpu(HDR(bh)->h_refcount) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) EXT2_XATTR_REFCOUNT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) ea_idebug(inode, "block %ld refcount %d>%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) (unsigned long) ce->e_value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) le32_to_cpu(HDR(bh)->h_refcount),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) EXT2_XATTR_REFCOUNT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) } else if (!ext2_xattr_cmp(header, HDR(bh))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) ea_bdebug(bh, "b_count=%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) atomic_read(&(bh->b_count)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) mb_cache_entry_touch(ea_block_cache, ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) mb_cache_entry_put(ea_block_cache, ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) return bh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) unlock_buffer(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) brelse(bh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) ce = mb_cache_entry_find_next(ea_block_cache, ce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) #define NAME_HASH_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) #define VALUE_HASH_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * ext2_xattr_hash_entry()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * Compute the hash of an extended attribute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) struct ext2_xattr_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) __u32 hash = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) char *name = entry->e_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) for (n=0; n < entry->e_name_len; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) hash = (hash << NAME_HASH_SHIFT) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) *name++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (entry->e_value_block == 0 && entry->e_value_size != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) __le32 *value = (__le32 *)((char *)header +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) le16_to_cpu(entry->e_value_offs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) for (n = (le32_to_cpu(entry->e_value_size) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) hash = (hash << VALUE_HASH_SHIFT) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) le32_to_cpu(*value++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) entry->e_hash = cpu_to_le32(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) #undef NAME_HASH_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) #undef VALUE_HASH_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) #define BLOCK_HASH_SHIFT 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * ext2_xattr_rehash()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * Re-compute the extended attribute hash value after an entry has changed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) static void ext2_xattr_rehash(struct ext2_xattr_header *header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) struct ext2_xattr_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) struct ext2_xattr_entry *here;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) __u32 hash = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) ext2_xattr_hash_entry(header, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) here = ENTRY(header+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) while (!IS_LAST_ENTRY(here)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (!here->e_hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) /* Block is not shared if an entry's hash value == 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) hash = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) hash = (hash << BLOCK_HASH_SHIFT) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) le32_to_cpu(here->e_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) here = EXT2_XATTR_NEXT(here);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) header->h_hash = cpu_to_le32(hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) #undef BLOCK_HASH_SHIFT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) #define HASH_BUCKET_BITS 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) struct mb_cache *ext2_xattr_create_cache(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) return mb_cache_create(HASH_BUCKET_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) void ext2_xattr_destroy_cache(struct mb_cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) if (cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) mb_cache_destroy(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }