Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)   Red Black Trees
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)   (C) 1999  Andrea Arcangeli <andrea@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)   linux/include/linux/rbtree.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)   To use rbtrees you'll have to implement your own insert and search cores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)   This will avoid us to use callbacks and to drop drammatically performances.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)   I know it's not the cleaner way,  but in C (not in C++) to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)   performances and genericity...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)   See Documentation/core-api/rbtree.rst for documentation and samples.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #ifndef __TOOLS_LINUX_PERF_RBTREE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define __TOOLS_LINUX_PERF_RBTREE_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) struct rb_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	unsigned long  __rb_parent_color;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	struct rb_node *rb_right;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	struct rb_node *rb_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) } __attribute__((aligned(sizeof(long))));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)     /* The alignment might seem pointless, but allegedly CRIS needs it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) struct rb_root {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	struct rb_node *rb_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define rb_parent(r)   ((struct rb_node *)((r)->__rb_parent_color & ~3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define RB_ROOT	(struct rb_root) { NULL, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define	rb_entry(ptr, type, member) container_of(ptr, type, member)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define RB_EMPTY_ROOT(root)  (READ_ONCE((root)->rb_node) == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) /* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define RB_EMPTY_NODE(node)  \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	((node)->__rb_parent_color == (unsigned long)(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define RB_CLEAR_NODE(node)  \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	((node)->__rb_parent_color = (unsigned long)(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) extern void rb_insert_color(struct rb_node *, struct rb_root *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) extern void rb_erase(struct rb_node *, struct rb_root *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) /* Find logical next and previous nodes in a tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) extern struct rb_node *rb_next(const struct rb_node *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) extern struct rb_node *rb_prev(const struct rb_node *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) extern struct rb_node *rb_first(const struct rb_root *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) extern struct rb_node *rb_last(const struct rb_root *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) /* Postorder iteration - always visit the parent after its children */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) extern struct rb_node *rb_first_postorder(const struct rb_root *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) extern struct rb_node *rb_next_postorder(const struct rb_node *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) /* Fast replacement of a single node without remove/rebalance/add/rebalance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 			    struct rb_root *root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 				struct rb_node **rb_link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	node->__rb_parent_color = (unsigned long)parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	node->rb_left = node->rb_right = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	*rb_link = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #define rb_entry_safe(ptr, type, member) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	({ typeof(ptr) ____ptr = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	   ____ptr ? rb_entry(____ptr, type, member) : NULL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  * given type allowing the backing memory of @pos to be invalidated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  * @pos:	the 'type *' to use as a loop cursor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85)  * @n:		another 'type *' to use as temporary storage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86)  * @root:	'rb_root *' of the rbtree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87)  * @field:	the name of the rb_node field within 'type'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * list_for_each_entry_safe() and allows the iteration to continue independent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * of changes to @pos by the body of the loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * Note, however, that it cannot handle other modifications that re-order the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * rbtree it is iterating over. This includes calling rb_erase() on @pos, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  * rb_erase() may rebalance the tree, causing us to miss some nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	     pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 			typeof(*pos), field); 1; }); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	     pos = n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	rb_erase(n, root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	RB_CLEAR_NODE(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * Leftmost-cached rbtrees.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  * We do not cache the rightmost node based on footprint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)  * size vs number of potential users that could benefit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)  * from O(1) rb_last(). Just not worth it, users that want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)  * this feature can always implement the logic explicitly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)  * Furthermore, users that want to cache both pointers may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  * find it a bit asymmetric, but that's ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct rb_root_cached {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	struct rb_root rb_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	struct rb_node *rb_leftmost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Same as rb_first(), but O(1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define rb_first_cached(root) (root)->rb_leftmost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static inline void rb_insert_color_cached(struct rb_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 					  struct rb_root_cached *root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 					  bool leftmost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	if (leftmost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		root->rb_leftmost = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	rb_insert_color(node, &root->rb_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static inline void rb_erase_cached(struct rb_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 				   struct rb_root_cached *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	if (root->rb_leftmost == node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		root->rb_leftmost = rb_next(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	rb_erase(node, &root->rb_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static inline void rb_replace_node_cached(struct rb_node *victim,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 					  struct rb_node *new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 					  struct rb_root_cached *root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	if (root->rb_leftmost == victim)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		root->rb_leftmost = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	rb_replace_node(victim, new, &root->rb_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #endif /* __TOOLS_LINUX_PERF_RBTREE_H */