^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/xarray.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * idr_alloc_u32() - Allocate an ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * @idr: IDR handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * @ptr: Pointer to be associated with the new ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * @nextid: Pointer to an ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * @max: The maximum ID to allocate (inclusive).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * @gfp: Memory allocation flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Allocates an unused ID in the range specified by @nextid and @max.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Note that @max is inclusive whereas the @end parameter to idr_alloc()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * is exclusive. The new ID is assigned to @nextid before the pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * is inserted into the IDR, so if @nextid points into the object pointed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * to by @ptr, a concurrent lookup will not find an uninitialised ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * The caller should provide their own locking to ensure that two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * concurrent modifications to the IDR are not possible. Read-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * accesses to the IDR may be done under the RCU read lock or may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * exclude simultaneous writers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * or -ENOSPC if no free IDs could be found. If an error occurred,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * @nextid is unchanged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long max, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct radix_tree_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) void __rcu **slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned int base = idr->idr_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned int id = *nextid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) idr->idr_rt.xa_flags |= IDR_RT_MARKER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) id = (id < base) ? 0 : id - base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) radix_tree_iter_init(&iter, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (IS_ERR(slot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return PTR_ERR(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) *nextid = iter.index + base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) /* there is a memory barrier inside radix_tree_iter_replace() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) EXPORT_SYMBOL_GPL(idr_alloc_u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * idr_alloc() - Allocate an ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * @idr: IDR handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * @ptr: Pointer to be associated with the new ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * @start: The minimum ID (inclusive).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * @end: The maximum ID (exclusive).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * @gfp: Memory allocation flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * Allocates an unused ID in the range specified by @start and @end. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * callers to use @start + N as @end as long as N is within integer range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * The caller should provide their own locking to ensure that two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * concurrent modifications to the IDR are not possible. Read-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * accesses to the IDR may be done under the RCU read lock or may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * exclude simultaneous writers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * or -ENOSPC if no free IDs could be found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) u32 id = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (WARN_ON_ONCE(start < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) EXPORT_SYMBOL_GPL(idr_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * idr_alloc_cyclic() - Allocate an ID cyclically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * @idr: IDR handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * @ptr: Pointer to be associated with the new ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * @start: The minimum ID (inclusive).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * @end: The maximum ID (exclusive).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * @gfp: Memory allocation flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Allocates an unused ID in the range specified by @nextid and @end. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * @end is <= 0, it is treated as one larger than %INT_MAX. This allows
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * callers to use @start + N as @end as long as N is within integer range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * The search for an unused ID will start at the last ID allocated and will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * wrap around to @start if no free IDs are found before reaching @end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * The caller should provide their own locking to ensure that two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * concurrent modifications to the IDR are not possible. Read-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * accesses to the IDR may be done under the RCU read lock or may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * exclude simultaneous writers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * or -ENOSPC if no free IDs could be found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u32 id = idr->idr_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int err, max = end > 0 ? end - 1 : INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) if ((int)id < start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) id = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) err = idr_alloc_u32(idr, ptr, &id, max, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if ((err == -ENOSPC) && (id > start)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) id = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) err = idr_alloc_u32(idr, ptr, &id, max, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) idr->idr_next = id + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) EXPORT_SYMBOL(idr_alloc_cyclic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * idr_remove() - Remove an ID from the IDR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * @idr: IDR handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * @id: Pointer ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * Removes this ID from the IDR. If the ID was not previously in the IDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * this function returns %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * Since this function modifies the IDR, the caller should provide their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * own locking to ensure that concurrent modification of the same IDR is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * not possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * Return: The pointer formerly associated with this ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) void *idr_remove(struct idr *idr, unsigned long id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) EXPORT_SYMBOL_GPL(idr_remove);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * idr_find() - Return pointer for given ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * @idr: IDR handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * @id: Pointer ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * Looks up the pointer associated with this ID. A %NULL pointer may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * indicate that @id is not allocated or that the %NULL pointer was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * associated with this ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * This function can be called under rcu_read_lock(), given that the leaf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * pointers lifetimes are correctly managed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * Return: The pointer associated with this ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) void *idr_find(const struct idr *idr, unsigned long id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) EXPORT_SYMBOL_GPL(idr_find);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * idr_for_each() - Iterate through all stored pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * @idr: IDR handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * @fn: Function to be called for each pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * @data: Data passed to callback function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * The callback function will be called for each entry in @idr, passing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * the ID, the entry and @data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * If @fn returns anything other than %0, the iteration stops and that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * value is returned from this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * idr_for_each() can be called concurrently with idr_alloc() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * idr_remove() if protected by RCU. Newly added entries may not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * seen and deleted entries may be seen, but adding and removing entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * will not cause other entries to be skipped, nor spurious ones to be seen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int idr_for_each(const struct idr *idr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) int (*fn)(int id, void *p, void *data), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct radix_tree_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) void __rcu **slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) int base = idr->idr_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned long id = iter.index + base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (WARN_ON_ONCE(id > INT_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ret = fn(id, rcu_dereference_raw(*slot), data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) EXPORT_SYMBOL(idr_for_each);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * idr_get_next_ul() - Find next populated entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * @idr: IDR handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * @nextid: Pointer to an ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * Returns the next populated entry in the tree with an ID greater than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * or equal to the value pointed to by @nextid. On exit, @nextid is updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * to the ID of the found value. To use in a loop, the value pointed to by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * nextid must be incremented by the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) struct radix_tree_iter iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) void __rcu **slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) void *entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned long base = idr->idr_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unsigned long id = *nextid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) id = (id < base) ? 0 : id - base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) entry = rcu_dereference_raw(*slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (!xa_is_internal(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) slot = radix_tree_iter_retry(&iter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (!slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) *nextid = iter.index + base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) EXPORT_SYMBOL(idr_get_next_ul);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * idr_get_next() - Find next populated entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * @idr: IDR handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * @nextid: Pointer to an ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * Returns the next populated entry in the tree with an ID greater than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * or equal to the value pointed to by @nextid. On exit, @nextid is updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * to the ID of the found value. To use in a loop, the value pointed to by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * nextid must be incremented by the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) void *idr_get_next(struct idr *idr, int *nextid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned long id = *nextid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) void *entry = idr_get_next_ul(idr, &id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (WARN_ON_ONCE(id > INT_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) *nextid = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) EXPORT_SYMBOL(idr_get_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * idr_replace() - replace pointer for given ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * @idr: IDR handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * @ptr: New pointer to associate with the ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * @id: ID to change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * Replace the pointer registered with an ID and return the old value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * This function can be called under the RCU read lock concurrently with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * idr_alloc() and idr_remove() (as long as the ID being removed is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * the one being replaced!).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * Returns: the old value on success. %-ENOENT indicates that @id was not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * found. %-EINVAL indicates that @ptr was not valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct radix_tree_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) void __rcu **slot = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) id -= idr->idr_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) __radix_tree_replace(&idr->idr_rt, node, slot, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) EXPORT_SYMBOL(idr_replace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * DOC: IDA description
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * The IDA is an ID allocator which does not provide the ability to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * associate an ID with a pointer. As such, it only needs to store one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * bit per ID, and so is more space efficient than an IDR. To use an IDA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * then initialise it using ida_init()). To allocate a new ID, call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * To free an ID, call ida_free().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * ida_destroy() can be used to dispose of an IDA without needing to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * free the individual IDs in it. You can use ida_is_empty() to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * out whether the IDA has any IDs currently allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * The IDA handles its own locking. It is safe to call any of the IDA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * functions without synchronisation in your code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * IDs are currently limited to the range [0-INT_MAX]. If this is an awkward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * limitation, it should be quite straightforward to raise the maximum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * Developer's notes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * The IDA uses the functionality provided by the XArray to store bitmaps in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * each entry. The XA_FREE_MARK is only cleared when all bits in the bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * have been set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * I considered telling the XArray that each slot is an order-10 node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * and indexing by bit number, but the XArray can't allow a single multi-index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * entry in the head, which would significantly increase memory consumption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * for the IDA. So instead we divide the index by the number of bits in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * leaf bitmap before doing a radix tree lookup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * As an optimisation, if there are only a few low bits set in any given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * leaf, instead of allocating a 128-byte bitmap, we store the bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * as a value entry. Value entries never have the XA_FREE_MARK cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * because we can always convert them into a bitmap entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * It would be possible to optimise further; once we've run out of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * single 128-byte bitmap, we currently switch to a 576-byte node, put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * the 128-byte bitmap in the first entry and then start allocating extra
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * 128-byte entries. We could instead use the 512 bytes of the node's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * data as a bitmap before moving to that scheme. I do not believe this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * is a worthwhile optimisation; Rasmus Villemoes surveyed the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * users of the IDA and almost none of them use more than 1024 entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * Those that do use more than the 8192 IDs that the 512 bytes would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * provide.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * The IDA always uses a lock to alloc/free. If we add a 'test_bit'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * equivalent, it will still need locking. Going to RCU lookup would require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * using RCU to free bitmaps, and that's not trivial without embedding an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * bitmap, which is excessive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * ida_alloc_range() - Allocate an unused ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * @ida: IDA handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * @min: Lowest ID to allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * @max: Highest ID to allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * @gfp: Memory allocation flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * Allocate an ID between @min and @max, inclusive. The allocated ID will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * not exceed %INT_MAX, even if @max is larger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * Context: Any context. It is safe to call this function without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * locking in your code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * or %-ENOSPC if there are no free IDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) unsigned bit = min % IDA_BITMAP_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct ida_bitmap *bitmap, *alloc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if ((int)min < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if ((int)max < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) max = INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) xas_lock_irqsave(&xas, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (xas.xa_index > min / IDA_BITMAP_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) goto nospc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (xa_is_value(bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) unsigned long tmp = xa_to_value(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) if (bit < BITS_PER_XA_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) goto nospc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (bit < BITS_PER_XA_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) tmp |= 1UL << bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) xas_store(&xas, xa_mk_value(tmp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) bitmap = alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (!bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (!bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) goto alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) bitmap->bitmap[0] = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) xas_store(&xas, bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (xas_error(&xas)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) bitmap->bitmap[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (bitmap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) goto nospc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (bit == IDA_BITMAP_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) __set_bit(bit, bitmap->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) xas_clear_mark(&xas, XA_FREE_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (bit < BITS_PER_XA_VALUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) bitmap = xa_mk_value(1UL << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) bitmap = alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (!bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (!bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) goto alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) __set_bit(bit, bitmap->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) xas_store(&xas, bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) xas_unlock_irqrestore(&xas, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (xas_nomem(&xas, gfp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) xas.xa_index = min / IDA_BITMAP_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) bit = min % IDA_BITMAP_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (bitmap != alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) kfree(alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (xas_error(&xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return xas_error(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return xas.xa_index * IDA_BITMAP_BITS + bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) xas_unlock_irqrestore(&xas, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) alloc = kzalloc(sizeof(*bitmap), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (!alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) xas_set(&xas, min / IDA_BITMAP_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) bit = min % IDA_BITMAP_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) nospc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) xas_unlock_irqrestore(&xas, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) kfree(alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) EXPORT_SYMBOL(ida_alloc_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * ida_free() - Release an allocated ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * @ida: IDA handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * @id: Previously allocated ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * Context: Any context. It is safe to call this function without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * locking in your code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) void ida_free(struct ida *ida, unsigned int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) unsigned bit = id % IDA_BITMAP_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct ida_bitmap *bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) BUG_ON((int)id < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) xas_lock_irqsave(&xas, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) bitmap = xas_load(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if (xa_is_value(bitmap)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) unsigned long v = xa_to_value(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) if (bit >= BITS_PER_XA_VALUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (!(v & (1UL << bit)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) v &= ~(1UL << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (!v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) goto delete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) xas_store(&xas, xa_mk_value(v));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (!test_bit(bit, bitmap->bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) __clear_bit(bit, bitmap->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) xas_set_mark(&xas, XA_FREE_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) kfree(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) delete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) xas_store(&xas, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) xas_unlock_irqrestore(&xas, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) xas_unlock_irqrestore(&xas, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) EXPORT_SYMBOL(ida_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * ida_destroy() - Free all IDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * @ida: IDA handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * Calling this function frees all IDs and releases all resources used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * by an IDA. When this call returns, the IDA is empty and can be reused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * or freed. If the IDA is already empty, there is no need to call this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * Context: Any context. It is safe to call this function without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * locking in your code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) void ida_destroy(struct ida *ida)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) XA_STATE(xas, &ida->xa, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct ida_bitmap *bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) xas_lock_irqsave(&xas, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) xas_for_each(&xas, bitmap, ULONG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) if (!xa_is_value(bitmap))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) kfree(bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) xas_store(&xas, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) xas_unlock_irqrestore(&xas, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) EXPORT_SYMBOL(ida_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) #ifndef __KERNEL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) extern void xa_dump_index(unsigned long index, unsigned int shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) #define IDA_CHUNK_SHIFT ilog2(IDA_BITMAP_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) static void ida_dump_entry(void *entry, unsigned long index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (xa_is_node(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct xa_node *node = xa_to_node(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) XA_CHUNK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) xa_dump_index(index * IDA_BITMAP_BITS, shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) xa_dump_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) for (i = 0; i < XA_CHUNK_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) ida_dump_entry(node->slots[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) index | (i << node->shift));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) } else if (xa_is_value(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct ida_bitmap *bitmap = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) pr_cont("bitmap: %p data", bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) for (i = 0; i < IDA_BITMAP_LONGS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) pr_cont(" %lx", bitmap->bitmap[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static void ida_dump(struct ida *ida)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) struct xarray *xa = &ida->xa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) xa->xa_flags >> ROOT_TAG_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ida_dump_entry(xa->xa_head, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) #endif