^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2011 STRATO AG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * written by Arne Jansen <sensille@gmx.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "ulist.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "ctree.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * ulist is a generic data structure to hold a collection of unique u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * values. The only operations it supports is adding to the list and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * enumerating it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * It is possible to store an auxiliary value along with the key.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * A sample usage for ulists is the enumeration of directed graphs without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * visiting a node twice. The pseudo-code could look like this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * ulist = ulist_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * ulist_add(ulist, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * ULIST_ITER_INIT(&uiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * while ((elem = ulist_next(ulist, &uiter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * for (all child nodes n in elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * ulist_add(ulist, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * do something useful with the node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * ulist_free(ulist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * This assumes the graph nodes are addressable by u64. This stems from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * usage for tree enumeration in btrfs, where the logical addresses are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * 64 bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * It is also useful for tree enumeration which could be done elegantly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * recursively, but is not possible due to kernel stack limitations. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * loop would be similar to the above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * ulist_init - freshly initialize a ulist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * @ulist: the ulist to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Note: don't use this function to init an already used ulist, use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * ulist_reinit instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) void ulist_init(struct ulist *ulist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) INIT_LIST_HEAD(&ulist->nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) ulist->root = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) ulist->nnodes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * ulist_release - free up additionally allocated memory for the ulist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * @ulist: the ulist from which to free the additional memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * This is useful in cases where the base 'struct ulist' has been statically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void ulist_release(struct ulist *ulist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct ulist_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct ulist_node *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) list_for_each_entry_safe(node, next, &ulist->nodes, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) kfree(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) ulist->root = RB_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) INIT_LIST_HEAD(&ulist->nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * ulist_reinit - prepare a ulist for reuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * @ulist: ulist to be reused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * Free up all additional memory allocated for the list elements and reinit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * the ulist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) void ulist_reinit(struct ulist *ulist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) ulist_release(ulist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ulist_init(ulist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * ulist_alloc - dynamically allocate a ulist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * @gfp_mask: allocation flags to for base allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * The allocated ulist will be returned in an initialized state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct ulist *ulist_alloc(gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (!ulist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ulist_init(ulist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return ulist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * ulist_free - free dynamically allocated ulist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * @ulist: ulist to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * It is not necessary to call ulist_release before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) void ulist_free(struct ulist *ulist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (!ulist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ulist_release(ulist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) kfree(ulist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static struct ulist_node *ulist_rbtree_search(struct ulist *ulist, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct rb_node *n = ulist->root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct ulist_node *u = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) while (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) u = rb_entry(n, struct ulist_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (u->val < val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) n = n->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) else if (u->val > val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) n = n->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void ulist_rbtree_erase(struct ulist *ulist, struct ulist_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) rb_erase(&node->rb_node, &ulist->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) list_del(&node->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) kfree(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) BUG_ON(ulist->nnodes == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ulist->nnodes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static int ulist_rbtree_insert(struct ulist *ulist, struct ulist_node *ins)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct rb_node **p = &ulist->root.rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct rb_node *parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct ulist_node *cur = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) while (*p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) parent = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) cur = rb_entry(parent, struct ulist_node, rb_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (cur->val < ins->val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) p = &(*p)->rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) else if (cur->val > ins->val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) p = &(*p)->rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) rb_link_node(&ins->rb_node, parent, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) rb_insert_color(&ins->rb_node, &ulist->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * ulist_add - add an element to the ulist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * @ulist: ulist to add the element to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @val: value to add to ulist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * @aux: auxiliary value to store along with val
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * @gfp_mask: flags to use for allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * Note: locking must be provided by the caller. In case of rwlocks write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * locking is needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * Add an element to a ulist. The @val will only be added if it doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * already exist. If it is added, the auxiliary value @aux is stored along with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * it. In case @val already exists in the ulist, @aux is ignored, even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * it differs from the already stored value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * ulist_add returns 0 if @val already exists in ulist and 1 if @val has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * inserted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * In case of allocation failure -ENOMEM is returned and the ulist stays
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * unaltered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u64 *old_aux, gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct ulist_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) node = ulist_rbtree_search(ulist, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (old_aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *old_aux = node->aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) node = kmalloc(sizeof(*node), gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) node->val = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) node->aux = aux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ret = ulist_rbtree_insert(ulist, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ASSERT(!ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) list_add_tail(&node->list, &ulist->nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ulist->nnodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * ulist_del - delete one node from ulist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * @ulist: ulist to remove node from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * @val: value to delete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * @aux: aux to delete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * The deletion will only be done when *BOTH* val and aux matches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * Return 0 for successful delete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * Return > 0 for not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int ulist_del(struct ulist *ulist, u64 val, u64 aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct ulist_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) node = ulist_rbtree_search(ulist, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* Not found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (node->aux != aux)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Found and delete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ulist_rbtree_erase(ulist, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * ulist_next - iterate ulist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * @ulist: ulist to iterate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * @uiter: iterator variable, initialized with ULIST_ITER_INIT(&iterator)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Note: locking must be provided by the caller. In case of rwlocks only read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * locking is needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * This function is used to iterate an ulist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * It returns the next element from the ulist or %NULL when the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * end is reached. No guarantee is made with respect to the order in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * the elements are returned. They might neither be returned in order of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * addition nor in ascending order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * It is allowed to call ulist_add during an enumeration. Newly added items
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * are guaranteed to show up in the running enumeration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct ulist_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (list_empty(&ulist->nodes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (uiter->cur_list && uiter->cur_list->next == &ulist->nodes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (uiter->cur_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) uiter->cur_list = uiter->cur_list->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) uiter->cur_list = ulist->nodes.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) node = list_entry(uiter->cur_list, struct ulist_node, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }