^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2001 Momchil Velikov
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Portions Copyright (C) 2001 Christoph Hellwig
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2005 SGI, Christoph Lameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2006 Nick Piggin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2012 Konstantin Khlebnikov
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2016 Intel, Matthew Wilcox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2016 Intel, Ross Zwisler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kmemleak.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/preempt.h> /* in_interrupt() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/radix-tree.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/rcupdate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/xarray.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Radix tree node cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct kmem_cache *radix_tree_node_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * The radix tree is variable-height, so an insert operation not only has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * to build the branch to its corresponding item, it also has to build the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * branch to existing items if the size has to be increased (by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * radix_tree_extend).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * The worst case is a zero height tree with just a single item at index 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * and then inserting an item at index ULONG_MAX. This requires 2 new branches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Hence:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * The IDR does not have to be as high as the radix tree since it uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * signed integers, not unsigned longs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) RADIX_TREE_MAP_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Per-cpu pool of preloaded nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) .lock = INIT_LOCAL_LOCK(lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static inline struct radix_tree_node *entry_to_node(void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static inline void *node_to_entry(void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define RADIX_TREE_RETRY XA_RETRY_ENTRY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return parent ? slot - parent->slots : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct radix_tree_node **nodep, unsigned long index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *nodep = (void *)entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) __set_bit(offset, node->tags[tag]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) __clear_bit(offset, node->tags[tag]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return test_bit(offset, node->tags[tag]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static inline void root_tag_clear_all(struct radix_tree_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline unsigned root_tags_get(const struct radix_tree_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static inline bool is_idr(const struct radix_tree_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return !!(root->xa_flags & ROOT_IS_IDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * Returns 1 if any slot in the node has this tag set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * Otherwise returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static inline int any_tag_set(const struct radix_tree_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) unsigned idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (node->tags[tag][idx])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * radix_tree_find_next_bit - find the next set bit in a memory region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @addr: The address to base the search on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * @size: The bitmap size in bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * @offset: The bitnumber to start searching at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * Unrollable variant of find_next_bit() for constant size arrays.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Returns next bit offset, or size if nothing found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static __always_inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) const unsigned long *addr = node->tags[tag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (offset < RADIX_TREE_MAP_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) addr += offset / BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) tmp = *addr >> (offset % BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return __ffs(tmp) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) while (offset < RADIX_TREE_MAP_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) tmp = *++addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return __ffs(tmp) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) offset += BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return RADIX_TREE_MAP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static unsigned int iter_offset(const struct radix_tree_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return iter->index & RADIX_TREE_MAP_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * The maximum index which can be stored in a radix tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static inline unsigned long shift_maxindex(unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return (RADIX_TREE_MAP_SIZE << shift) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static inline unsigned long node_maxindex(const struct radix_tree_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return shift_maxindex(node->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static unsigned long next_index(unsigned long index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) const struct radix_tree_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return (index & ~node_maxindex(node)) + (offset << node->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * This assumes that the caller has performed appropriate preallocation, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) * that the caller has pinned this thread of control to the current CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static struct radix_tree_node *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unsigned int shift, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) unsigned int count, unsigned int nr_values)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct radix_tree_node *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * Preload code isn't irq safe and it doesn't make sense to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * preloading during an interrupt anyway as all the allocations have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * to be atomic. So just do normal allocation when in interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct radix_tree_preload *rtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * Even if the caller has preloaded, try to allocate from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * cache first for the new node to get accounted to the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * cgroup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) ret = kmem_cache_alloc(radix_tree_node_cachep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) gfp_mask | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Provided the caller has preloaded here, we will always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * succeed in getting a node here (and never reach
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * kmem_cache_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) rtp = this_cpu_ptr(&radix_tree_preloads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (rtp->nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) ret = rtp->nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) rtp->nodes = ret->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) rtp->nr--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * Update the allocation stack trace as this is more useful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * for debugging.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) kmemleak_update_trace(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) BUG_ON(radix_tree_is_internal_node(ret));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ret->shift = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ret->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) ret->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ret->nr_values = nr_values;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) ret->parent = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) ret->array = root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) void radix_tree_node_rcu_free(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct radix_tree_node *node =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) container_of(head, struct radix_tree_node, rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * Must only free zeroed nodes into the slab. We can be left with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * non-NULL entries by radix_tree_free_nodes, so clear the entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * and tags here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) memset(node->slots, 0, sizeof(node->slots));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) memset(node->tags, 0, sizeof(node->tags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) INIT_LIST_HEAD(&node->private_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) kmem_cache_free(radix_tree_node_cachep, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) radix_tree_node_free(struct radix_tree_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * Load up this CPU's radix_tree_node buffer with sufficient objects to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * ensure that the addition of a single element in the tree cannot fail. On
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * success, return zero, with preemption disabled. On error, return -ENOMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * with preemption not disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * To make use of this facility, the radix tree must be initialised without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct radix_tree_preload *rtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct radix_tree_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * Nodes preloaded by one cgroup can be used by another cgroup, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * they should never be accounted to any particular memory cgroup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) gfp_mask &= ~__GFP_ACCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) local_lock(&radix_tree_preloads.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) rtp = this_cpu_ptr(&radix_tree_preloads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) while (rtp->nr < nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) local_unlock(&radix_tree_preloads.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (node == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) local_lock(&radix_tree_preloads.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) rtp = this_cpu_ptr(&radix_tree_preloads);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (rtp->nr < nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) node->parent = rtp->nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) rtp->nodes = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) rtp->nr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) kmem_cache_free(radix_tree_node_cachep, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * Load up this CPU's radix_tree_node buffer with sufficient objects to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * ensure that the addition of a single element in the tree cannot fail. On
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * success, return zero, with preemption disabled. On error, return -ENOMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * with preemption not disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * To make use of this facility, the radix tree must be initialised without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) int radix_tree_preload(gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /* Warn on non-sensical use... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) EXPORT_SYMBOL(radix_tree_preload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * The same as above function, except we don't guarantee preloading happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * We do it, if we decide it helps. On success, return zero with preemption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * disabled. On error, return -ENOMEM with preemption not disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) int radix_tree_maybe_preload(gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (gfpflags_allow_blocking(gfp_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Preloading doesn't help anything with this gfp mask, skip it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) local_lock(&radix_tree_preloads.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) EXPORT_SYMBOL(radix_tree_maybe_preload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static unsigned radix_tree_load_root(const struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct radix_tree_node **nodep, unsigned long *maxindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) *nodep = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (likely(radix_tree_is_internal_node(node))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) node = entry_to_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) *maxindex = node_maxindex(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return node->shift + RADIX_TREE_MAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) *maxindex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * Extend a radix tree so it can store key @index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) unsigned long index, unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) unsigned int maxshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* Figure out what the shift should be. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) maxshift = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) while (index > shift_maxindex(maxshift))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) maxshift += RADIX_TREE_MAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) entry = rcu_dereference_raw(root->xa_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) root, shift, 0, 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (is_idr(root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) all_tag_set(node, IDR_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (!root_tag_get(root, IDR_FREE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) tag_clear(node, IDR_FREE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) root_tag_set(root, IDR_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* Propagate the aggregated tag info to the new child */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (root_tag_get(root, tag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) tag_set(node, tag, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) BUG_ON(shift > BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (radix_tree_is_internal_node(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) entry_to_node(entry)->parent = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) } else if (xa_is_value(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* Moving a value entry root->xa_head to a node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) node->nr_values = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * entry was already in the radix tree, so we do not need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * rcu_assign_pointer here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) node->slots[0] = (void __rcu *)entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) entry = node_to_entry(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) rcu_assign_pointer(root->xa_head, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) shift += RADIX_TREE_MAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) } while (shift <= maxshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) return maxshift + RADIX_TREE_MAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * radix_tree_shrink - shrink radix tree to minimum height
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * @root radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static inline bool radix_tree_shrink(struct radix_tree_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) bool shrunk = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) struct radix_tree_node *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (!radix_tree_is_internal_node(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) node = entry_to_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * The candidate node has more than one child, or its child
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * is not at the leftmost slot, we cannot shrink.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) if (node->count != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) child = rcu_dereference_raw(node->slots[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (!child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * For an IDR, we must not shrink entry 0 into the root in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * case somebody calls idr_replace() with a pointer that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) * appears to be an internal entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (!node->shift && is_idr(root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (radix_tree_is_internal_node(child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) entry_to_node(child)->parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * We don't need rcu_assign_pointer(), since we are simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * moving the node from one part of the tree to another: if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * was safe to dereference the old pointer to it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * (node->slots[0]), it will be safe to dereference the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * one (root->xa_head) as far as dependent read barriers go.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) root->xa_head = (void __rcu *)child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) root_tag_clear(root, IDR_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * We have a dilemma here. The node's slot[0] must not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * NULLed in case there are concurrent lookups expecting to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * find the item. However if this was a bottom-level node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * then it may be subject to the slot pointer being visible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * to callers dereferencing it. If item corresponding to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * slot[0] is subsequently deleted, these callers would expect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * their slot to become empty sooner or later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * For example, lockless pagecache will look up a slot, deref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) * the page pointer, and if the page has 0 refcount it means it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * was concurrently deleted from pagecache so try the deref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * again. Fortunately there is already a requirement for logic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * to retry the entire slot lookup -- the indirect pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * problem (replacing direct root node with an indirect pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * also results in a stale slot). So tag the slot as indirect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * to force callers to retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) node->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (!radix_tree_is_internal_node(child)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) WARN_ON_ONCE(!list_empty(&node->private_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) radix_tree_node_free(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) shrunk = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return shrunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static bool delete_node(struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct radix_tree_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) bool deleted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct radix_tree_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (node->count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (node_to_entry(node) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) rcu_dereference_raw(root->xa_head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) deleted |= radix_tree_shrink(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return deleted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) parent = node->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) parent->slots[node->offset] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) parent->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * Shouldn't the tags already have all been cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * by the caller?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (!is_idr(root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) root_tag_clear_all(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) root->xa_head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) WARN_ON_ONCE(!list_empty(&node->private_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) radix_tree_node_free(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) deleted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) node = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) } while (node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return deleted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * __radix_tree_create - create a slot in a radix tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * @index: index key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * @nodep: returns node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) * @slotp: returns slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * Create, if necessary, and return the node and slot for an item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * at position @index in the radix tree @root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) * Until there is more than one item in the tree, no nodes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * allocated and @root->xa_head is used as a direct slot instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * pointing to a node, in which case *@nodep will be NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * Returns -ENOMEM, or 0 for success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) static int __radix_tree_create(struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) unsigned long index, struct radix_tree_node **nodep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) void __rcu ***slotp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct radix_tree_node *node = NULL, *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) void __rcu **slot = (void __rcu **)&root->xa_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) unsigned long maxindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) unsigned int shift, offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) unsigned long max = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) gfp_t gfp = root_gfp_mask(root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) shift = radix_tree_load_root(root, &child, &maxindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* Make sure the tree is high enough. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (max > maxindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) int error = radix_tree_extend(root, gfp, max, shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) shift = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) child = rcu_dereference_raw(root->xa_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) while (shift > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) shift -= RADIX_TREE_MAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (child == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /* Have to add a child node. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) child = radix_tree_node_alloc(gfp, node, root, shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) offset, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (!child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) rcu_assign_pointer(*slot, node_to_entry(child));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) if (node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) node->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) } else if (!radix_tree_is_internal_node(child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) /* Go a level down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) node = entry_to_node(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) offset = radix_tree_descend(node, &child, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) slot = &node->slots[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (nodep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) *nodep = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (slotp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) *slotp = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) * Free any nodes below this node. The tree is presumed to not need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * shrinking, and any user data in the tree is presumed to not need a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * destructor called on it. If we need to add a destructor, we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * add that functionality later. Note that we may not clear tags or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * slots from the tree as an RCU walker may still have a pointer into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * this subtree. We could replace the entries with RADIX_TREE_RETRY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) * but we'll still have to clear those in rcu_free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) static void radix_tree_free_nodes(struct radix_tree_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) unsigned offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) struct radix_tree_node *child = entry_to_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) void *entry = rcu_dereference_raw(child->slots[offset]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (xa_is_node(entry) && child->shift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) child = entry_to_node(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) while (offset == RADIX_TREE_MAP_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct radix_tree_node *old = child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) offset = child->offset + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) child = child->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) WARN_ON_ONCE(!list_empty(&old->private_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) radix_tree_node_free(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) if (old == entry_to_node(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static inline int insert_entries(struct radix_tree_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) void __rcu **slot, void *item, bool replace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) if (*slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) rcu_assign_pointer(*slot, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) node->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (xa_is_value(item))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) node->nr_values++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * __radix_tree_insert - insert into a radix tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * @index: index key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * @item: item to insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * Insert an item into the radix tree at position @index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) int radix_tree_insert(struct radix_tree_root *root, unsigned long index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) void *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) struct radix_tree_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) void __rcu **slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) BUG_ON(radix_tree_is_internal_node(item));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) error = __radix_tree_create(root, index, &node, &slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) error = insert_entries(node, slot, item, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) unsigned offset = get_slot_offset(node, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) BUG_ON(tag_get(node, 0, offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) BUG_ON(tag_get(node, 1, offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) BUG_ON(tag_get(node, 2, offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) BUG_ON(root_tags_get(root));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) EXPORT_SYMBOL(radix_tree_insert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * __radix_tree_lookup - lookup an item in a radix tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * @index: index key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * @nodep: returns node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * @slotp: returns slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * Lookup and return the item at position @index in the radix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * tree @root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * Until there is more than one item in the tree, no nodes are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * allocated and @root->xa_head is used as a direct slot instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * pointing to a node, in which case *@nodep will be NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) void *__radix_tree_lookup(const struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) unsigned long index, struct radix_tree_node **nodep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) void __rcu ***slotp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) struct radix_tree_node *node, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) unsigned long maxindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) void __rcu **slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) slot = (void __rcu **)&root->xa_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) radix_tree_load_root(root, &node, &maxindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (index > maxindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) while (radix_tree_is_internal_node(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) unsigned offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) parent = entry_to_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) offset = radix_tree_descend(parent, &node, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) slot = parent->slots + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (node == RADIX_TREE_RETRY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (parent->shift == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (nodep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) *nodep = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (slotp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) *slotp = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) * radix_tree_lookup_slot - lookup a slot in a radix tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * @index: index key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * Returns: the slot corresponding to the position @index in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * radix tree @root. This is useful for update-if-exists operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * This function can be called under rcu_read_lock iff the slot is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * modified by radix_tree_replace_slot, otherwise it must be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * exclusive from other writers. Any dereference of the slot must be done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * using radix_tree_deref_slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) unsigned long index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) void __rcu **slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (!__radix_tree_lookup(root, index, NULL, &slot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) EXPORT_SYMBOL(radix_tree_lookup_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * radix_tree_lookup - perform lookup operation on a radix tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * @index: index key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) * Lookup the item at the position @index in the radix tree @root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * This function can be called under rcu_read_lock, however the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * must manage lifetimes of leaf nodes (eg. RCU may also be used to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * them safely). No RCU barriers are required to access or modify the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * returned item, however.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) return __radix_tree_lookup(root, index, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) EXPORT_SYMBOL(radix_tree_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static void replace_slot(void __rcu **slot, void *item,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct radix_tree_node *node, int count, int values)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) if (node && (count || values)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) node->count += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) node->nr_values += values;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) rcu_assign_pointer(*slot, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) static bool node_tag_get(const struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) const struct radix_tree_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) unsigned int tag, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) return tag_get(node, tag, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) return root_tag_get(root, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * IDR users want to be able to store NULL in the tree, so if the slot isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * free, don't adjust the count, even if it's transitioning between NULL and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * have empty bits, but it only stores NULL in slots when they're being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * deleted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) static int calculate_count(struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) struct radix_tree_node *node, void __rcu **slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) void *item, void *old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (is_idr(root)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) unsigned offset = get_slot_offset(node, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) bool free = node_tag_get(root, node, IDR_FREE, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (!free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) if (!old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return !!item - !!old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * __radix_tree_replace - replace item in a slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * @node: pointer to tree node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * @slot: pointer to slot in @node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * @item: new item to store in the slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * For use with __radix_tree_lookup(). Caller must hold tree write locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * across slot lookup and replacement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) void __radix_tree_replace(struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) struct radix_tree_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) void __rcu **slot, void *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) void *old = rcu_dereference_raw(*slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) int values = !!xa_is_value(item) - !!xa_is_value(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) int count = calculate_count(root, node, slot, item, old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * This function supports replacing value entries and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * deleting entries, but that needs accounting against the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * node unless the slot is root->xa_head.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) (count || values));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) replace_slot(slot, item, node, count, values);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) delete_node(root, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * radix_tree_replace_slot - replace item in a slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) * @slot: pointer to slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * @item: new item to store in the slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * For use with radix_tree_lookup_slot() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) * across slot lookup and replacement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * NOTE: This cannot be used to switch between non-entries (empty slots),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * regular entries, and value entries, as that requires accounting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * inside the radix tree node. When switching from one type of entry or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * deleting, use __radix_tree_lookup() and __radix_tree_replace() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * radix_tree_iter_replace().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) void radix_tree_replace_slot(struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) void __rcu **slot, void *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) __radix_tree_replace(root, NULL, slot, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) EXPORT_SYMBOL(radix_tree_replace_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * radix_tree_iter_replace - replace item in a slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * @slot: pointer to slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * @item: new item to store in the slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * For use with radix_tree_for_each_slot().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * Caller must hold tree write locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) void radix_tree_iter_replace(struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) const struct radix_tree_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) void __rcu **slot, void *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) __radix_tree_replace(root, iter->node, slot, item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static void node_tag_set(struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) struct radix_tree_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) unsigned int tag, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) if (tag_get(node, tag, offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) tag_set(node, tag, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) offset = node->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) node = node->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (!root_tag_get(root, tag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) root_tag_set(root, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * radix_tree_tag_set - set a tag on a radix tree node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * @index: index key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * @tag: tag index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) * Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) * corresponding to @index in the radix tree. From
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) * the root all the way down to the leaf node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) * Returns the address of the tagged item. Setting a tag on a not-present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * item is a bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) void *radix_tree_tag_set(struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) unsigned long index, unsigned int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) struct radix_tree_node *node, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) unsigned long maxindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) radix_tree_load_root(root, &node, &maxindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) BUG_ON(index > maxindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) while (radix_tree_is_internal_node(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) unsigned offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) parent = entry_to_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) offset = radix_tree_descend(parent, &node, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) BUG_ON(!node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (!tag_get(parent, tag, offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) tag_set(parent, tag, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) /* set the root's tag bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (!root_tag_get(root, tag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) root_tag_set(root, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) EXPORT_SYMBOL(radix_tree_tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static void node_tag_clear(struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct radix_tree_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) unsigned int tag, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) if (!tag_get(node, tag, offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) tag_clear(node, tag, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (any_tag_set(node, tag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) offset = node->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) node = node->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /* clear the root's tag bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (root_tag_get(root, tag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) root_tag_clear(root, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * radix_tree_tag_clear - clear a tag on a radix tree node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) * @index: index key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * @tag: tag index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * corresponding to @index in the radix tree. If this causes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * the leaf node to have no tags set then clear the tag in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * next-to-leaf node, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * Returns the address of the tagged item on success, else NULL. ie:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * has the same return value and semantics as radix_tree_lookup().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) void *radix_tree_tag_clear(struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) unsigned long index, unsigned int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) struct radix_tree_node *node, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) unsigned long maxindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) radix_tree_load_root(root, &node, &maxindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (index > maxindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) while (radix_tree_is_internal_node(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) parent = entry_to_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) offset = radix_tree_descend(parent, &node, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) if (node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) node_tag_clear(root, parent, tag, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) EXPORT_SYMBOL(radix_tree_tag_clear);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * radix_tree_iter_tag_clear - clear a tag on the current iterator entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * @iter: iterator state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * @tag: tag to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) void radix_tree_iter_tag_clear(struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) const struct radix_tree_iter *iter, unsigned int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) node_tag_clear(root, iter->node, tag, iter_offset(iter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) * radix_tree_tag_get - get a tag on a radix tree node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * @index: index key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * @tag: tag index (< RADIX_TREE_MAX_TAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * Return values:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * 0: tag not present or not set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * 1: tag set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * Note that the return value of this function may not be relied on, even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) * the RCU lock is held, unless tag modification and node deletion are excluded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * from concurrency.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) int radix_tree_tag_get(const struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) unsigned long index, unsigned int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct radix_tree_node *node, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) unsigned long maxindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) if (!root_tag_get(root, tag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) radix_tree_load_root(root, &node, &maxindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) if (index > maxindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) while (radix_tree_is_internal_node(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) unsigned offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) parent = entry_to_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) offset = radix_tree_descend(parent, &node, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (!tag_get(parent, tag, offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (node == RADIX_TREE_RETRY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) EXPORT_SYMBOL(radix_tree_tag_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /* Construct iter->tags bit-mask from node->tags[tag] array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static void set_iter_tags(struct radix_tree_iter *iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) struct radix_tree_node *node, unsigned offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) unsigned tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) unsigned tag_long = offset / BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) unsigned tag_bit = offset % BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) iter->tags = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) iter->tags = node->tags[tag][tag_long] >> tag_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /* Pick tags from next element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (tag_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) iter->tags |= node->tags[tag][tag_long + 1] <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) (BITS_PER_LONG - tag_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /* Clip chunk size, here only BITS_PER_LONG tags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) void __rcu **radix_tree_iter_resume(void __rcu **slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) struct radix_tree_iter *iter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) slot++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) iter->index = __radix_tree_iter_add(iter, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) iter->next_index = iter->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) iter->tags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) EXPORT_SYMBOL(radix_tree_iter_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * radix_tree_next_chunk - find next chunk of slots for iteration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * @iter: iterator state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * @flags: RADIX_TREE_ITER_* flags and tag index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * Returns: pointer to chunk first slot, or NULL if iteration is over
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct radix_tree_iter *iter, unsigned flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) struct radix_tree_node *node, *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) unsigned long index, offset, maxindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * Catch next_index overflow after ~0UL. iter->index never overflows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * during iterating; it can be zero only at the beginning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * And we cannot overflow iter->next_index in a single step,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * This condition also used by radix_tree_next_slot() to stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) * contiguous iterating, and forbid switching to the next chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) index = iter->next_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (!index && iter->index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) radix_tree_load_root(root, &child, &maxindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) if (index > maxindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) if (!child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) if (!radix_tree_is_internal_node(child)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) /* Single-slot tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) iter->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) iter->next_index = maxindex + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) iter->tags = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) iter->node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) return (void __rcu **)&root->xa_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) node = entry_to_node(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) offset = radix_tree_descend(node, &child, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if ((flags & RADIX_TREE_ITER_TAGGED) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) !tag_get(node, tag, offset) : !child) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /* Hole detected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (flags & RADIX_TREE_ITER_CONTIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) if (flags & RADIX_TREE_ITER_TAGGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) offset = radix_tree_find_next_bit(node, tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) offset + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) while (++offset < RADIX_TREE_MAP_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) void *slot = rcu_dereference_raw(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) node->slots[offset]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) index &= ~node_maxindex(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) index += offset << node->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /* Overflow after ~0UL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (!index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) if (offset == RADIX_TREE_MAP_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) child = rcu_dereference_raw(node->slots[offset]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (!child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (child == RADIX_TREE_RETRY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) } while (node->shift && radix_tree_is_internal_node(child));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) /* Update the iterator state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) iter->index = (index &~ node_maxindex(node)) | offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) iter->next_index = (index | node_maxindex(node)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) iter->node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (flags & RADIX_TREE_ITER_TAGGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) set_iter_tags(iter, node, offset, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) return node->slots + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) EXPORT_SYMBOL(radix_tree_next_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) * radix_tree_gang_lookup - perform multiple lookup on a radix tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) * @results: where the results of the lookup are placed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * @first_index: start the lookup from this key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * @max_items: place up to this many items at *results
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * Performs an index-ascending scan of the tree for present items. Places
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) * them at *@results and returns the number of items which were placed at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) * *@results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) * The implementation is naive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) * Like radix_tree_lookup, radix_tree_gang_lookup may be called under
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * rcu_read_lock. In this case, rather than the returned results being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * an atomic snapshot of the tree at a single point in time, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * semantics of an RCU protected gang lookup are as though multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * radix_tree_lookups have been issued in individual locks, and results
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) * stored in 'results'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) unsigned long first_index, unsigned int max_items)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) struct radix_tree_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) void __rcu **slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) unsigned int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (unlikely(!max_items))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) radix_tree_for_each_slot(slot, root, &iter, first_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) results[ret] = rcu_dereference_raw(*slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) if (!results[ret])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) if (radix_tree_is_internal_node(results[ret])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) slot = radix_tree_iter_retry(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) if (++ret == max_items)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) EXPORT_SYMBOL(radix_tree_gang_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * based on a tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * @results: where the results of the lookup are placed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * @first_index: start the lookup from this key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) * @max_items: place up to this many items at *results
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * Performs an index-ascending scan of the tree for present items which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * have the tag indexed by @tag set. Places the items at *@results and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * returns the number of items which were placed at *@results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) unsigned long first_index, unsigned int max_items,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) unsigned int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) struct radix_tree_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) void __rcu **slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) unsigned int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (unlikely(!max_items))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) results[ret] = rcu_dereference_raw(*slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (!results[ret])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (radix_tree_is_internal_node(results[ret])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) slot = radix_tree_iter_retry(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (++ret == max_items)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * radix tree based on a tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * @results: where the results of the lookup are placed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * @first_index: start the lookup from this key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) * @max_items: place up to this many items at *results
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) * Performs an index-ascending scan of the tree for present items which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * have the tag indexed by @tag set. Places the slots at *@results and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * returns the number of slots which were placed at *@results.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) void __rcu ***results, unsigned long first_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) unsigned int max_items, unsigned int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) struct radix_tree_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) void __rcu **slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) unsigned int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (unlikely(!max_items))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) results[ret] = slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) if (++ret == max_items)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) static bool __radix_tree_delete(struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) struct radix_tree_node *node, void __rcu **slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) void *old = rcu_dereference_raw(*slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) int values = xa_is_value(old) ? -1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) unsigned offset = get_slot_offset(node, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) if (is_idr(root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) node_tag_set(root, node, IDR_FREE, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) node_tag_clear(root, node, tag, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) replace_slot(slot, NULL, node, -1, values);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) return node && delete_node(root, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) * radix_tree_iter_delete - delete the entry at this iterator position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) * @iter: iterator state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) * @slot: pointer to slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * Delete the entry at the position currently pointed to by the iterator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * This may result in the current node being freed; if it is, the iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * is advanced so that it will not reference the freed memory. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) * function may be called without any locking if there are no other threads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * which can access this tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) void radix_tree_iter_delete(struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) struct radix_tree_iter *iter, void __rcu **slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (__radix_tree_delete(root, iter->node, slot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) iter->index = iter->next_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) EXPORT_SYMBOL(radix_tree_iter_delete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) * radix_tree_delete_item - delete an item from a radix tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * @index: index key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * @item: expected item
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) * Remove @item at @index from the radix tree rooted at @root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) * Return: the deleted entry, or %NULL if it was not present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) * or the entry at the given @index was not @item.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) void *radix_tree_delete_item(struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) unsigned long index, void *item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) struct radix_tree_node *node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) void __rcu **slot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) entry = __radix_tree_lookup(root, index, &node, &slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (!slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) get_slot_offset(node, slot))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (item && entry != item)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) __radix_tree_delete(root, node, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) EXPORT_SYMBOL(radix_tree_delete_item);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) * radix_tree_delete - delete an entry from a radix tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) * @index: index key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * Remove the entry at @index from the radix tree rooted at @root.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * Return: The deleted entry, or %NULL if it was not present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) return radix_tree_delete_item(root, index, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) EXPORT_SYMBOL(radix_tree_delete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * radix_tree_tagged - test whether any items in the tree are tagged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) * @root: radix tree root
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) * @tag: tag to test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) return root_tag_get(root, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) EXPORT_SYMBOL(radix_tree_tagged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * idr_preload - preload for idr_alloc()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * @gfp_mask: allocation mask to use for preloading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * Preallocate memory to use for the next call to idr_alloc(). This function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * returns with preemption disabled. It will be enabled by idr_preload_end().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) void idr_preload(gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) local_lock(&radix_tree_preloads.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) EXPORT_SYMBOL(idr_preload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) void __rcu **idr_get_free(struct radix_tree_root *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) struct radix_tree_iter *iter, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) unsigned long max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) struct radix_tree_node *node = NULL, *child;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) void __rcu **slot = (void __rcu **)&root->xa_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) unsigned long maxindex, start = iter->next_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) unsigned int shift, offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) grow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) shift = radix_tree_load_root(root, &child, &maxindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) if (!radix_tree_tagged(root, IDR_FREE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) start = max(start, maxindex + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (start > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) return ERR_PTR(-ENOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (start > maxindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) int error = radix_tree_extend(root, gfp, start, shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) shift = error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) child = rcu_dereference_raw(root->xa_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (start == 0 && shift == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) shift = RADIX_TREE_MAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) while (shift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) shift -= RADIX_TREE_MAP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (child == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) /* Have to add a child node. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) child = radix_tree_node_alloc(gfp, node, root, shift,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) offset, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (!child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) all_tag_set(child, IDR_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) rcu_assign_pointer(*slot, node_to_entry(child));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) node->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) } else if (!radix_tree_is_internal_node(child))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) node = entry_to_node(child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) offset = radix_tree_descend(node, &child, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) if (!tag_get(node, IDR_FREE, offset)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) offset = radix_tree_find_next_bit(node, IDR_FREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) offset + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) start = next_index(start, node, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) if (start > max || start == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) return ERR_PTR(-ENOSPC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) while (offset == RADIX_TREE_MAP_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) offset = node->offset + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) node = node->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) goto grow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) shift = node->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) child = rcu_dereference_raw(node->slots[offset]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) slot = &node->slots[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) iter->index = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) iter->next_index = 1 + min(max, (start | node_maxindex(node)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) iter->next_index = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) iter->node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) set_iter_tags(iter, node, offset, IDR_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) * idr_destroy - release all internal memory from an IDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) * @idr: idr handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) * After this function is called, the IDR is empty, and may be reused or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * the data structure containing it may be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) * A typical clean-up sequence for objects stored in an idr tree will use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * idr_for_each() to free all objects, if necessary, then idr_destroy() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * free the memory used to keep track of those objects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) void idr_destroy(struct idr *idr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) if (radix_tree_is_internal_node(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) radix_tree_free_nodes(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) idr->idr_rt.xa_head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) root_tag_set(&idr->idr_rt, IDR_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) EXPORT_SYMBOL(idr_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) radix_tree_node_ctor(void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) struct radix_tree_node *node = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) memset(node, 0, sizeof(*node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) INIT_LIST_HEAD(&node->private_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) static int radix_tree_cpu_dead(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) struct radix_tree_preload *rtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) struct radix_tree_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) /* Free per-cpu pool of preloaded nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) rtp = &per_cpu(radix_tree_preloads, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) while (rtp->nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) node = rtp->nodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) rtp->nodes = node->parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) kmem_cache_free(radix_tree_node_cachep, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) rtp->nr--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) void __init radix_tree_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) BUILD_BUG_ON(XA_CHUNK_SIZE > 255);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) sizeof(struct radix_tree_node), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) radix_tree_node_ctor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) NULL, radix_tree_cpu_dead);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) WARN_ON(ret < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) }