Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * XArray implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (c) 2017-2018 Microsoft Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 2018-2020 Oracle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Author: Matthew Wilcox <willy@infradead.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/xarray.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * Coding conventions in this file:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * @xa is used to refer to the entire xarray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * @xas is the 'xarray operation state'.  It may be either a pointer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * an xa_state, or an xa_state stored on the stack.  This is an unfortunate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * ambiguity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * @index is the index of the entry being operated on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * @mark is an xa_mark_t; a small number indicating one of the mark bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * @node refers to an xa_node; usually the primary one being operated on by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * @offset is the index into the slots array inside an xa_node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * @parent refers to the @xa_node closer to the head than @node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * @entry refers to something stored in a slot in the xarray
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) static inline unsigned int xa_lock_type(const struct xarray *xa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	return (__force unsigned int)xa->xa_flags & 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	if (lock_type == XA_LOCK_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 		xas_lock_irq(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	else if (lock_type == XA_LOCK_BH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 		xas_lock_bh(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 		xas_lock(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	if (lock_type == XA_LOCK_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 		xas_unlock_irq(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	else if (lock_type == XA_LOCK_BH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 		xas_unlock_bh(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 		xas_unlock(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) static inline bool xa_track_free(const struct xarray *xa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	return xa->xa_flags & XA_FLAGS_TRACK_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static inline bool xa_zero_busy(const struct xarray *xa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	return xa->xa_flags & XA_FLAGS_ZERO_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	if (!(xa->xa_flags & XA_FLAGS_MARK(mark)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 		xa->xa_flags |= XA_FLAGS_MARK(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	if (xa->xa_flags & XA_FLAGS_MARK(mark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 		xa->xa_flags &= ~(XA_FLAGS_MARK(mark));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	return node->marks[(__force unsigned)mark];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) static inline bool node_get_mark(struct xa_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 		unsigned int offset, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	return test_bit(offset, node_marks(node, mark));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) /* returns true if the bit was set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) static inline bool node_set_mark(struct xa_node *node, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 				xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	return __test_and_set_bit(offset, node_marks(node, mark));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) /* returns true if the bit was set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) static inline bool node_clear_mark(struct xa_node *node, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 				xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	return __test_and_clear_bit(offset, node_marks(node, mark));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #define mark_inc(mark) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  * xas_squash_marks() - Merge all marks to the first entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  * @xas: Array operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  * Set a mark on the first entry if any entry has it set.  Clear marks on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  * all sibling entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) static void xas_squash_marks(const struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	unsigned int mark = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	if (!xas->xa_sibs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		unsigned long *marks = xas->xa_node->marks[mark];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		__set_bit(xas->xa_offset, marks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	} while (mark++ != (__force unsigned)XA_MARK_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) /* extracts the offset within this node from the index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) static unsigned int get_offset(unsigned long index, struct xa_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	return (index >> node->shift) & XA_CHUNK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) static void xas_set_offset(struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	xas->xa_offset = get_offset(xas->xa_index, xas->xa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) /* move the index either forwards (find) or backwards (sibling slot) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) static void xas_move_index(struct xa_state *xas, unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	unsigned int shift = xas->xa_node->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	xas->xa_index &= ~XA_CHUNK_MASK << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	xas->xa_index += offset << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) static void xas_advance(struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	xas->xa_offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	xas_move_index(xas, xas->xa_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) static void *set_bounds(struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	xas->xa_node = XAS_BOUNDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173)  * Starts a walk.  If the @xas is already valid, we assume that it's on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174)  * the right path and just return where we've got to.  If we're in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175)  * error state, return NULL.  If the index is outside the current scope
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176)  * of the xarray, return NULL without changing @xas->xa_node.  Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177)  * set @xas->xa_node to NULL and return the current head of the array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) static void *xas_start(struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	if (xas_valid(xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 		return xas_reload(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	if (xas_error(xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	entry = xa_head(xas->xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	if (!xa_is_node(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		if (xas->xa_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 			return set_bounds(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 			return set_bounds(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	xas->xa_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) static void *xas_descend(struct xa_state *xas, struct xa_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	unsigned int offset = get_offset(xas->xa_index, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	void *entry = xa_entry(xas->xa, node, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	xas->xa_node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	if (xa_is_sibling(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		offset = xa_to_sibling(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		entry = xa_entry(xas->xa, node, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	xas->xa_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  * xas_load() - Load an entry from the XArray (advanced).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  * Usually walks the @xas to the appropriate state to load the entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * stored at xa_index.  However, it will do nothing and return %NULL if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  * @xas is in an error state.  xas_load() will never expand the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  * If the xa_state is set up to operate on a multi-index entry, xas_load()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  * may return %NULL or an internal entry, even if there are entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  * present within the range specified by @xas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  * Context: Any context.  The caller should hold the xa_lock or the RCU lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  * Return: Usually an entry in the XArray, but see description for exceptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) void *xas_load(struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	void *entry = xas_start(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	while (xa_is_node(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		struct xa_node *node = xa_to_node(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		if (xas->xa_shift > node->shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		entry = xas_descend(xas, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		if (node->shift == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) EXPORT_SYMBOL_GPL(xas_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) /* Move the radix tree node cache here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) extern struct kmem_cache *radix_tree_node_cachep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) extern void radix_tree_node_rcu_free(struct rcu_head *head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) #define XA_RCU_FREE	((struct xarray *)1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) static void xa_node_free(struct xa_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	node->array = XA_RCU_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  * xas_destroy() - Free any resources allocated during the XArray operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  * This function is now internal-only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) static void xas_destroy(struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	struct xa_node *next, *node = xas->xa_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		next = rcu_dereference_raw(node->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		radix_tree_node_rcu_free(&node->rcu_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		xas->xa_alloc = node = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  * xas_nomem() - Allocate memory if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  * @gfp: Memory allocation flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  * If we need to add new nodes to the XArray, we try to allocate memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285)  * with GFP_NOWAIT while holding the lock, which will usually succeed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286)  * If it fails, @xas is flagged as needing memory to continue.  The caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287)  * should drop the lock and call xas_nomem().  If xas_nomem() succeeds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288)  * the caller should retry the operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  * Forward progress is guaranteed as one node is allocated here and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  * stored in the xa_state where it will be found by xas_alloc().  More
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)  * nodes will likely be found in the slab allocator, but we do not tie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  * them up here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  * Return: true if memory was needed, and was successfully allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) bool xas_nomem(struct xa_state *xas, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	if (xas->xa_node != XA_ERROR(-ENOMEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		xas_destroy(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		gfp |= __GFP_ACCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	if (!xas->xa_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	xas->xa_alloc->parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	xas->xa_node = XAS_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) EXPORT_SYMBOL_GPL(xas_nomem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  * __xas_nomem() - Drop locks and allocate memory if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  * @gfp: Memory allocation flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)  * Internal variant of xas_nomem().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  * Return: true if memory was needed, and was successfully allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) static bool __xas_nomem(struct xa_state *xas, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	__must_hold(xas->xa->xa_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	unsigned int lock_type = xa_lock_type(xas->xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	if (xas->xa_node != XA_ERROR(-ENOMEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		xas_destroy(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 		gfp |= __GFP_ACCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	if (gfpflags_allow_blocking(gfp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		xas_unlock_type(xas, lock_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 		xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 		xas_lock_type(xas, lock_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	if (!xas->xa_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	xas->xa_alloc->parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	xas->xa_node = XAS_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) static void xas_update(struct xa_state *xas, struct xa_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	if (xas->xa_update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		xas->xa_update(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) static void *xas_alloc(struct xa_state *xas, unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	struct xa_node *parent = xas->xa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	struct xa_node *node = xas->xa_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	if (xas_invalid(xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		xas->xa_alloc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 			gfp |= __GFP_ACCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		if (!node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			xas_set_err(xas, -ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	if (parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		node->offset = xas->xa_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		parent->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		xas_update(xas, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	XA_NODE_BUG_ON(node, !list_empty(&node->private_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	node->shift = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	node->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	node->nr_values = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	RCU_INIT_POINTER(node->parent, xas->xa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	node->array = xas->xa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	return node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) #ifdef CONFIG_XARRAY_MULTI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) /* Returns the number of indices covered by a given xa_state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) static unsigned long xas_size(const struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	return (xas->xa_sibs + 1UL) << xas->xa_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407)  * Use this to calculate the maximum index that will need to be created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408)  * in order to add the entry described by @xas.  Because we cannot store a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  * multi-index entry at index 0, the calculation is a little more complex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  * than you might expect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) static unsigned long xas_max(struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	unsigned long max = xas->xa_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) #ifdef CONFIG_XARRAY_MULTI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	if (xas->xa_shift || xas->xa_sibs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		unsigned long mask = xas_size(xas) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		max |= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		if (mask == max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 			max++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	return max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) /* The maximum index that can be contained in the array without expanding it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) static unsigned long max_index(void *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	if (!xa_is_node(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) static void xas_shrink(struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	struct xarray *xa = xas->xa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	struct xa_node *node = xas->xa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		if (node->count != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		entry = xa_entry_locked(xa, node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		if (!xa_is_node(entry) && node->shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		if (xa_is_zero(entry) && xa_zero_busy(xa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 			entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		xas->xa_node = XAS_BOUNDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		RCU_INIT_POINTER(xa->xa_head, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 			xa_mark_clear(xa, XA_FREE_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		node->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		node->nr_values = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		if (!xa_is_node(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		xas_update(xas, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		xa_node_free(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		if (!xa_is_node(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		node = xa_to_node(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		node->parent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474)  * xas_delete_node() - Attempt to delete an xa_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475)  * @xas: Array operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477)  * Attempts to delete the @xas->xa_node.  This will fail if xa->node has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  * a non-zero reference count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) static void xas_delete_node(struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	struct xa_node *node = xas->xa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		struct xa_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		if (node->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		parent = xa_parent_locked(xas->xa, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		xas->xa_node = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		xas->xa_offset = node->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		xa_node_free(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		if (!parent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			xas->xa->xa_head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			xas->xa_node = XAS_BOUNDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		parent->slots[xas->xa_offset] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		parent->count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		node = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		xas_update(xas, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	if (!node->parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		xas_shrink(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  * xas_free_nodes() - Free this node and all nodes that it references
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515)  * @xas: Array operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  * @top: Node to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518)  * This node has been removed from the tree.  We must now free it and all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519)  * of its subnodes.  There may be RCU walkers with references into the tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520)  * so we must replace all entries with retry markers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) static void xas_free_nodes(struct xa_state *xas, struct xa_node *top)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	unsigned int offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	struct xa_node *node = top;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		void *entry = xa_entry_locked(xas->xa, node, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		if (node->shift && xa_is_node(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 			node = xa_to_node(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 			offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 		if (entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		while (offset == XA_CHUNK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			struct xa_node *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 			parent = xa_parent_locked(xas->xa, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			offset = node->offset + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			node->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 			node->nr_values = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			xas_update(xas, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			xa_node_free(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			if (node == top)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			node = parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  * xas_expand adds nodes to the head of the tree until it has reached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  * sufficient height to be able to contain @xas->xa_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) static int xas_expand(struct xa_state *xas, void *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	struct xarray *xa = xas->xa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	struct xa_node *node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	unsigned int shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	unsigned long max = xas_max(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	if (!head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		if (max == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		while ((max >> shift) >= XA_CHUNK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			shift += XA_CHUNK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		return shift + XA_CHUNK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	} else if (xa_is_node(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 		node = xa_to_node(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 		shift = node->shift + XA_CHUNK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	xas->xa_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	while (max > max_index(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		xa_mark_t mark = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		XA_NODE_BUG_ON(node, shift > BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		node = xas_alloc(xas, shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		node->count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		if (xa_is_value(head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 			node->nr_values = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		RCU_INIT_POINTER(node->slots[0], head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		/* Propagate the aggregated mark info to the new child */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			if (xa_track_free(xa) && mark == XA_FREE_MARK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 				node_mark_all(node, XA_FREE_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 				if (!xa_marked(xa, XA_FREE_MARK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 					node_clear_mark(node, 0, XA_FREE_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 					xa_mark_set(xa, XA_FREE_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 			} else if (xa_marked(xa, mark)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 				node_set_mark(node, 0, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 			if (mark == XA_MARK_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 			mark_inc(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		 * Now that the new node is fully initialised, we can add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		 * it to the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		if (xa_is_node(head)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			xa_to_node(head)->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			rcu_assign_pointer(xa_to_node(head)->parent, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		head = xa_mk_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		rcu_assign_pointer(xa->xa_head, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		xas_update(xas, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		shift += XA_CHUNK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	xas->xa_node = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	return shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626)  * xas_create() - Create a slot to store an entry in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628)  * @allow_root: %true if we can store the entry in the root directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630)  * Most users will not need to call this function directly, as it is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631)  * by xas_store().  It is useful for doing conditional store operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632)  * (see the xa_cmpxchg() implementation for an example).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634)  * Return: If the slot already existed, returns the contents of this slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635)  * If the slot was newly created, returns %NULL.  If it failed to create the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636)  * slot, returns %NULL and indicates the error in @xas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) static void *xas_create(struct xa_state *xas, bool allow_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	struct xarray *xa = xas->xa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	void __rcu **slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	struct xa_node *node = xas->xa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	unsigned int order = xas->xa_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	if (xas_top(node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		entry = xa_head_locked(xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		xas->xa_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		if (!entry && xa_zero_busy(xa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			entry = XA_ZERO_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		shift = xas_expand(xas, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		if (shift < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		if (!shift && !allow_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			shift = XA_CHUNK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		entry = xa_head_locked(xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		slot = &xa->xa_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	} else if (xas_error(xas)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	} else if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		unsigned int offset = xas->xa_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		shift = node->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		entry = xa_entry_locked(xa, node, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		slot = &node->slots[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		entry = xa_head_locked(xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		slot = &xa->xa_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	while (shift > order) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		shift -= XA_CHUNK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		if (!entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 			node = xas_alloc(xas, shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			if (xa_track_free(xa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 				node_mark_all(node, XA_FREE_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 			rcu_assign_pointer(*slot, xa_mk_node(node));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		} else if (xa_is_node(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			node = xa_to_node(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		entry = xas_descend(xas, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		slot = &node->slots[xas->xa_offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695)  * xas_create_range() - Ensure that stores to this range will succeed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698)  * Creates all of the slots in the range covered by @xas.  Sets @xas to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699)  * create single-index entries and positions it at the beginning of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700)  * range.  This is for the benefit of users which have not yet been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701)  * converted to use multi-index entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) void xas_create_range(struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	unsigned long index = xas->xa_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	unsigned char shift = xas->xa_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	unsigned char sibs = xas->xa_sibs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	xas->xa_index |= ((sibs + 1UL) << shift) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		xas->xa_offset |= sibs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	xas->xa_shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	xas->xa_sibs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		xas_create(xas, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		if (xas_error(xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 			goto restore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		if (xas->xa_index <= (index | XA_CHUNK_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			goto success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		xas->xa_index -= XA_CHUNK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			struct xa_node *node = xas->xa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			if (node->shift >= shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			xas->xa_node = xa_parent_locked(xas->xa, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			xas->xa_offset = node->offset - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 			if (node->offset != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) restore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	xas->xa_shift = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	xas->xa_sibs = sibs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	xas->xa_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) success:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	xas->xa_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	if (xas->xa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		xas_set_offset(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) EXPORT_SYMBOL_GPL(xas_create_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) static void update_node(struct xa_state *xas, struct xa_node *node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		int count, int values)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	if (!node || (!count && !values))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	node->count += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	node->nr_values += values;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	xas_update(xas, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	if (count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		xas_delete_node(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762)  * xas_store() - Store this entry in the XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764)  * @entry: New entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766)  * If @xas is operating on a multi-index entry, the entry returned by this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767)  * function is essentially meaningless (it may be an internal entry or it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768)  * may be %NULL, even if there are non-NULL entries at some of the indices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769)  * covered by the range).  This is not a problem for any current users,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770)  * and can be changed if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772)  * Return: The old entry at this index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) void *xas_store(struct xa_state *xas, void *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	struct xa_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	void __rcu **slot = &xas->xa->xa_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	unsigned int offset, max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	int values = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	void *first, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	bool value = xa_is_value(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	if (entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		first = xas_create(xas, allow_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		first = xas_load(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	if (xas_invalid(xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		return first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	node = xas->xa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	if (node && (xas->xa_shift < node->shift))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		xas->xa_sibs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if ((first == entry) && !xas->xa_sibs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		return first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	next = first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	offset = xas->xa_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	max = xas->xa_offset + xas->xa_sibs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		slot = &node->slots[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		if (xas->xa_sibs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			xas_squash_marks(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		xas_init_marks(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		 * Must clear the marks before setting the entry to NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		 * otherwise xas_for_each_marked may find a NULL entry and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		 * stop early.  rcu_assign_pointer contains a release barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		 * so the mark clearing will appear to happen before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		 * entry is set to NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		rcu_assign_pointer(*slot, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		if (xa_is_node(next) && (!node || node->shift))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			xas_free_nodes(xas, xa_to_node(next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		count += !next - !entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		values += !xa_is_value(first) - !value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		if (entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			if (offset == max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 			if (!xa_is_sibling(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 				entry = xa_mk_sibling(xas->xa_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			if (offset == XA_CHUNK_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		next = xa_entry_locked(xas->xa, node, ++offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		if (!xa_is_sibling(next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			if (!entry && (offset > max))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 			first = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		slot++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	update_node(xas, node, count, values);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	return first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) EXPORT_SYMBOL_GPL(xas_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849)  * xas_get_mark() - Returns the state of this mark.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851)  * @mark: Mark number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853)  * Return: true if the mark is set, false if the mark is clear or @xas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854)  * is in an error state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	if (xas_invalid(xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	if (!xas->xa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		return xa_marked(xas->xa, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	return node_get_mark(xas->xa_node, xas->xa_offset, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) EXPORT_SYMBOL_GPL(xas_get_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867)  * xas_set_mark() - Sets the mark on this entry and its parents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869)  * @mark: Mark number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871)  * Sets the specified mark on this entry, and walks up the tree setting it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  * on all the ancestor entries.  Does nothing if @xas has not been walked to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873)  * an entry, or is in an error state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) void xas_set_mark(const struct xa_state *xas, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	struct xa_node *node = xas->xa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	unsigned int offset = xas->xa_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	if (xas_invalid(xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		if (node_set_mark(node, offset, mark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		offset = node->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		node = xa_parent_locked(xas->xa, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	if (!xa_marked(xas->xa, mark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		xa_mark_set(xas->xa, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) EXPORT_SYMBOL_GPL(xas_set_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896)  * xas_clear_mark() - Clears the mark on this entry and its parents.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898)  * @mark: Mark number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  * Clears the specified mark on this entry, and walks back to the head
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  * attempting to clear it on all the ancestor entries.  Does nothing if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902)  * @xas has not been walked to an entry, or is in an error state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	struct xa_node *node = xas->xa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	unsigned int offset = xas->xa_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	if (xas_invalid(xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	while (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		if (!node_clear_mark(node, offset, mark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		if (node_any_mark(node, mark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		offset = node->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		node = xa_parent_locked(xas->xa, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	if (xa_marked(xas->xa, mark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		xa_mark_clear(xas->xa, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) EXPORT_SYMBOL_GPL(xas_clear_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928)  * xas_init_marks() - Initialise all marks for the entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929)  * @xas: Array operations state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931)  * Initialise all marks for the entry specified by @xas.  If we're tracking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932)  * free entries with a mark, we need to set it on all entries.  All other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933)  * marks are cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935)  * This implementation is not as efficient as it could be; we may walk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  * up the tree multiple times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) void xas_init_marks(const struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	xa_mark_t mark = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		if (xa_track_free(xas->xa) && mark == XA_FREE_MARK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			xas_set_mark(xas, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			xas_clear_mark(xas, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		if (mark == XA_MARK_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		mark_inc(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) EXPORT_SYMBOL_GPL(xas_init_marks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) #ifdef CONFIG_XARRAY_MULTI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	unsigned int marks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	xa_mark_t mark = XA_MARK_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		if (node_get_mark(node, offset, mark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			marks |= 1 << (__force unsigned int)mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		if (mark == XA_MARK_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		mark_inc(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	return marks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) static void node_set_marks(struct xa_node *node, unsigned int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 			struct xa_node *child, unsigned int marks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	xa_mark_t mark = XA_MARK_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		if (marks & (1 << (__force unsigned int)mark)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			node_set_mark(node, offset, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			if (child)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 				node_mark_all(child, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		if (mark == XA_MARK_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		mark_inc(mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989)  * xas_split_alloc() - Allocate memory for splitting an entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991)  * @entry: New entry which will be stored in the array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992)  * @order: New entry order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993)  * @gfp: Memory allocation flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  * This function should be called before calling xas_split().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  * If necessary, it will allocate new nodes (and fill them with @entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997)  * to prepare for the upcoming split of an entry of @order size into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998)  * entries of the order stored in the @xas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)  * Context: May sleep if @gfp flags permit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	unsigned int mask = xas->xa_sibs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	/* XXX: no support for splitting really large entries yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	if (xas->xa_shift + XA_CHUNK_SHIFT > order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		void *sibling = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		struct xa_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		node->array = xas->xa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		for (i = 0; i < XA_CHUNK_SIZE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 			if ((i & mask) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 				RCU_INIT_POINTER(node->slots[i], entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 				sibling = xa_mk_sibling(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 				RCU_INIT_POINTER(node->slots[i], sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		RCU_INIT_POINTER(node->parent, xas->xa_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		xas->xa_alloc = node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	} while (sibs-- > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	xas_destroy(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	xas_set_err(xas, -ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) EXPORT_SYMBOL_GPL(xas_split_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)  * xas_split() - Split a multi-index entry into smaller entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)  * @entry: New entry to store in the array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)  * @order: New entry order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)  * The value in the entry is copied to all the replacement entries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)  * Context: Any context.  The caller should hold the xa_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) void xas_split(struct xa_state *xas, void *entry, unsigned int order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	unsigned int sibs = (1 << (order % XA_CHUNK_SHIFT)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	unsigned int offset, marks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	struct xa_node *node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	void *curr = xas_load(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	int values = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	node = xas->xa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	if (xas_top(node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	marks = node_get_marks(node, xas->xa_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	offset = xas->xa_offset + sibs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		if (xas->xa_shift < node->shift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			struct xa_node *child = xas->xa_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			xas->xa_alloc = rcu_dereference_raw(child->parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			child->shift = node->shift - XA_CHUNK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 			child->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 			child->count = XA_CHUNK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			child->nr_values = xa_is_value(entry) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 					XA_CHUNK_SIZE : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 			RCU_INIT_POINTER(child->parent, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			node_set_marks(node, offset, child, marks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 			rcu_assign_pointer(node->slots[offset],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 					xa_mk_node(child));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 			if (xa_is_value(curr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 				values--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 			xas_update(xas, child);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			unsigned int canon = offset - xas->xa_sibs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 			node_set_marks(node, canon, NULL, marks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			rcu_assign_pointer(node->slots[canon], entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			while (offset > canon)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 				rcu_assign_pointer(node->slots[offset--],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 						xa_mk_sibling(canon));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			values += (xa_is_value(entry) - xa_is_value(curr)) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 					(xas->xa_sibs + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	} while (offset-- > xas->xa_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	node->nr_values += values;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	xas_update(xas, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) EXPORT_SYMBOL_GPL(xas_split);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)  * xas_pause() - Pause a walk to drop a lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)  * Some users need to pause a walk and drop the lock they're holding in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)  * order to yield to a higher priority thread or carry out an operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)  * on an entry.  Those users should call this function before they drop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)  * the lock.  It resets the @xas to be suitable for the next iteration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)  * of the loop after the user has reacquired the lock.  If most entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)  * found during a walk require you to call xas_pause(), the xa_for_each()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)  * iterator may be more appropriate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)  * Note that xas_pause() only works for forward iteration.  If a user needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)  * to pause a reverse iteration, we will need a xas_pause_rev().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) void xas_pause(struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	struct xa_node *node = xas->xa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	if (xas_invalid(xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	xas->xa_node = XAS_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		unsigned long offset = xas->xa_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		while (++offset < XA_CHUNK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 			if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		xas->xa_index += (offset - xas->xa_offset) << node->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		if (xas->xa_index == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 			xas->xa_node = XAS_BOUNDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		xas->xa_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) EXPORT_SYMBOL_GPL(xas_pause);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)  * __xas_prev() - Find the previous entry in the XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)  * Helper function for xas_prev() which handles all the complex cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146)  * out of line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) void *__xas_prev(struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	if (!xas_frozen(xas->xa_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		xas->xa_index--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	if (!xas->xa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		return set_bounds(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	if (xas_not_node(xas->xa_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		return xas_load(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		xas->xa_offset--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	while (xas->xa_offset == 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		xas->xa_offset = xas->xa_node->offset - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		xas->xa_node = xa_parent(xas->xa, xas->xa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		if (!xas->xa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 			return set_bounds(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		if (!xa_is_node(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		xas->xa_node = xa_to_node(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		xas_set_offset(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) EXPORT_SYMBOL_GPL(__xas_prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)  * __xas_next() - Find the next entry in the XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)  * Helper function for xas_next() which handles all the complex cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)  * out of line.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) void *__xas_next(struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	if (!xas_frozen(xas->xa_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		xas->xa_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	if (!xas->xa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		return set_bounds(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	if (xas_not_node(xas->xa_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		return xas_load(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		xas->xa_offset++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	while (xas->xa_offset == XA_CHUNK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		xas->xa_offset = xas->xa_node->offset + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		xas->xa_node = xa_parent(xas->xa, xas->xa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		if (!xas->xa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 			return set_bounds(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		if (!xa_is_node(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 			return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		xas->xa_node = xa_to_node(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		xas_set_offset(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) EXPORT_SYMBOL_GPL(__xas_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)  * xas_find() - Find the next present entry in the XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)  * @max: Highest index to return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)  * If the @xas has not yet been walked to an entry, return the entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)  * which has an index >= xas.xa_index.  If it has been walked, the entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)  * currently being pointed at has been processed, and so we move to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)  * next entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)  * If no entry is found and the array is smaller than @max, the iterator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)  * is set to the smallest index not yet in the array.  This allows @xas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)  * to be immediately passed to xas_store().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)  * Return: The entry, if found, otherwise %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) void *xas_find(struct xa_state *xas, unsigned long max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	if (xas_error(xas) || xas->xa_node == XAS_BOUNDS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	if (xas->xa_index > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		return set_bounds(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	if (!xas->xa_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		xas->xa_index = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		return set_bounds(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	} else if (xas->xa_node == XAS_RESTART) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		entry = xas_load(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		if (entry || xas_not_node(xas->xa_node))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 			return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	} else if (!xas->xa_node->shift &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		    xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	xas_advance(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	while (xas->xa_node && (xas->xa_index <= max)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 			xas->xa_offset = xas->xa_node->offset + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 			xas->xa_node = xa_parent(xas->xa, xas->xa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		if (xa_is_node(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 			xas->xa_node = xa_to_node(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 			xas->xa_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		if (entry && !xa_is_sibling(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		xas_advance(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	if (!xas->xa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		xas->xa_node = XAS_BOUNDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) EXPORT_SYMBOL_GPL(xas_find);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)  * xas_find_marked() - Find the next marked entry in the XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)  * @max: Highest index to return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)  * @mark: Mark number to search for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)  * If the @xas has not yet been walked to an entry, return the marked entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)  * which has an index >= xas.xa_index.  If it has been walked, the entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)  * currently being pointed at has been processed, and so we return the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)  * first marked entry with an index > xas.xa_index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)  * If no marked entry is found and the array is smaller than @max, @xas is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)  * set to the bounds state and xas->xa_index is set to the smallest index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)  * not yet in the array.  This allows @xas to be immediately passed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)  * xas_store().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)  * If no entry is found before @max is reached, @xas is set to the restart
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)  * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)  * Return: The entry, if found, otherwise %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	bool advance = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	if (xas_error(xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	if (xas->xa_index > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		goto max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	if (!xas->xa_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		xas->xa_index = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	} else if (xas_top(xas->xa_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		advance = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		entry = xa_head(xas->xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		xas->xa_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		if (xas->xa_index > max_index(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		if (!xa_is_node(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 			if (xa_marked(xas->xa, mark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 				return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 			xas->xa_index = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		xas->xa_node = xa_to_node(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		xas->xa_offset = xas->xa_index >> xas->xa_node->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	while (xas->xa_index <= max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 			xas->xa_offset = xas->xa_node->offset + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 			xas->xa_node = xa_parent(xas->xa, xas->xa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			if (!xas->xa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 			advance = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		if (!advance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 			entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 			if (xa_is_sibling(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 				xas->xa_offset = xa_to_sibling(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 				xas_move_index(xas, xas->xa_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		offset = xas_find_chunk(xas, advance, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		if (offset > xas->xa_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 			advance = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 			xas_move_index(xas, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 			/* Mind the wrap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 			if ((xas->xa_index - 1) >= max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 				goto max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 			xas->xa_offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 			if (offset == XA_CHUNK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		if (!xa_is_node(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		xas->xa_node = xa_to_node(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		xas_set_offset(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	if (xas->xa_index > max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		goto max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	return set_bounds(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) max:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	xas->xa_node = XAS_RESTART;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) EXPORT_SYMBOL_GPL(xas_find_marked);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)  * xas_find_conflict() - Find the next present entry in a range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)  * @xas: XArray operation state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387)  * The @xas describes both a range and a position within that range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)  * Context: Any context.  Expects xa_lock to be held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390)  * Return: The next entry in the range covered by @xas or %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) void *xas_find_conflict(struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	void *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	if (xas_error(xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	if (!xas->xa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	if (xas_top(xas->xa_node)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		curr = xas_start(xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		if (!curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		while (xa_is_node(curr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 			struct xa_node *node = xa_to_node(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 			curr = xas_descend(xas, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		if (curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 			return curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	if (xas->xa_node->shift > xas->xa_shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		if (xas->xa_node->shift == xas->xa_shift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 			if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		} else if (xas->xa_offset == XA_CHUNK_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 			xas->xa_offset = xas->xa_node->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 			xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 			if (!xas->xa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		if (xa_is_sibling(curr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		while (xa_is_node(curr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 			xas->xa_node = xa_to_node(curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 			xas->xa_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 			curr = xa_entry_locked(xas->xa, xas->xa_node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		if (curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 			return curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	xas->xa_offset -= xas->xa_sibs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) EXPORT_SYMBOL_GPL(xas_find_conflict);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)  * xa_load() - Load an entry from an XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)  * @index: index into array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)  * Context: Any context.  Takes and releases the RCU lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)  * Return: The entry at @index in @xa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) void *xa_load(struct xarray *xa, unsigned long index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	XA_STATE(xas, xa, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		entry = xas_load(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		if (xa_is_zero(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 			entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	} while (xas_retry(&xas, entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) EXPORT_SYMBOL(xa_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) static void *xas_result(struct xa_state *xas, void *curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	if (xa_is_zero(curr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	if (xas_error(xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		curr = xas->xa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	return curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)  * __xa_erase() - Erase this entry from the XArray while locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)  * @index: Index into array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)  * After this function returns, loading from @index will return %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)  * If the index is part of a multi-index entry, all indices will be erased
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)  * and none of the entries will be part of a multi-index entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)  * Context: Any context.  Expects xa_lock to be held on entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488)  * Return: The entry which used to be at this index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) void *__xa_erase(struct xarray *xa, unsigned long index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	XA_STATE(xas, xa, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	return xas_result(&xas, xas_store(&xas, NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) EXPORT_SYMBOL(__xa_erase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)  * xa_erase() - Erase this entry from the XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)  * @index: Index of entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)  * After this function returns, loading from @index will return %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)  * If the index is part of a multi-index entry, all indices will be erased
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)  * and none of the entries will be part of a multi-index entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)  * Context: Any context.  Takes and releases the xa_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)  * Return: The entry which used to be at this index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) void *xa_erase(struct xarray *xa, unsigned long index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	xa_lock(xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	entry = __xa_erase(xa, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	xa_unlock(xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) EXPORT_SYMBOL(xa_erase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)  * __xa_store() - Store this entry in the XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)  * @index: Index into array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)  * @entry: New entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)  * @gfp: Memory allocation flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)  * You must already be holding the xa_lock when calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)  * It will drop the lock if needed to allocate memory, and then reacquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)  * it afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532)  * Context: Any context.  Expects xa_lock to be held on entry.  May
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)  * release and reacquire xa_lock if @gfp flags permit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)  * Return: The old entry at this index or xa_err() if an error happened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	XA_STATE(xas, xa, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	void *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		return XA_ERROR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	if (xa_track_free(xa) && !entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		entry = XA_ZERO_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		curr = xas_store(&xas, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		if (xa_track_free(xa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 			xas_clear_mark(&xas, XA_FREE_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	} while (__xas_nomem(&xas, gfp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	return xas_result(&xas, curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) EXPORT_SYMBOL(__xa_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557)  * xa_store() - Store this entry in the XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)  * @index: Index into array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)  * @entry: New entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561)  * @gfp: Memory allocation flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)  * After this function returns, loads from this index will return @entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)  * Storing into an existing multi-index entry updates the entry of every index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)  * The marks associated with @index are unaffected unless @entry is %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567)  * Context: Any context.  Takes and releases the xa_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)  * May sleep if the @gfp flags permit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)  * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570)  * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)  * failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	void *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	xa_lock(xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	curr = __xa_store(xa, index, entry, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	xa_unlock(xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	return curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) EXPORT_SYMBOL(xa_store);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)  * __xa_cmpxchg() - Store this entry in the XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)  * @index: Index into array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)  * @old: Old value to test against.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)  * @entry: New entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)  * @gfp: Memory allocation flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)  * You must already be holding the xa_lock when calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)  * It will drop the lock if needed to allocate memory, and then reacquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)  * it afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)  * Context: Any context.  Expects xa_lock to be held on entry.  May
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)  * release and reacquire xa_lock if @gfp flags permit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)  * Return: The old entry at this index or xa_err() if an error happened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 			void *old, void *entry, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	XA_STATE(xas, xa, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	void *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		return XA_ERROR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		curr = xas_load(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		if (curr == old) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 			xas_store(&xas, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 			if (xa_track_free(xa) && entry && !curr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 				xas_clear_mark(&xas, XA_FREE_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	} while (__xas_nomem(&xas, gfp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	return xas_result(&xas, curr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) EXPORT_SYMBOL(__xa_cmpxchg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)  * __xa_insert() - Store this entry in the XArray if no entry is present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)  * @index: Index into array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627)  * @entry: New entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)  * @gfp: Memory allocation flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)  * Inserting a NULL entry will store a reserved entry (like xa_reserve())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631)  * if no entry is present.  Inserting will fail if a reserved entry is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)  * present, even though loading from this index will return NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)  * Context: Any context.  Expects xa_lock to be held on entry.  May
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)  * release and reacquire xa_lock if @gfp flags permit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)  * Return: 0 if the store succeeded.  -EBUSY if another entry was present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)  * -ENOMEM if memory could not be allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	XA_STATE(xas, xa, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	void *curr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		entry = XA_ZERO_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		curr = xas_load(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		if (!curr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 			xas_store(&xas, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 			if (xa_track_free(xa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 				xas_clear_mark(&xas, XA_FREE_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 			xas_set_err(&xas, -EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	} while (__xas_nomem(&xas, gfp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	return xas_error(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) EXPORT_SYMBOL(__xa_insert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) #ifdef CONFIG_XARRAY_MULTI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) static void xas_set_range(struct xa_state *xas, unsigned long first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		unsigned long last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	unsigned int shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	unsigned long sibs = last - first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	unsigned int offset = XA_CHUNK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	xas_set(xas, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	while ((first & XA_CHUNK_MASK) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		if (sibs < XA_CHUNK_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		shift += XA_CHUNK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		if (offset == XA_CHUNK_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 			offset = sibs & XA_CHUNK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		sibs >>= XA_CHUNK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		first >>= XA_CHUNK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	offset = first & XA_CHUNK_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	if (offset + sibs > XA_CHUNK_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		sibs = XA_CHUNK_MASK - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	if ((((first + sibs + 1) << shift) - 1) > last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		sibs -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	xas->xa_shift = shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	xas->xa_sibs = sibs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697)  * xa_store_range() - Store this entry at a range of indices in the XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699)  * @first: First index to affect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)  * @last: Last index to affect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)  * @entry: New entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)  * @gfp: Memory allocation flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)  * After this function returns, loads from any index between @first and @last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705)  * inclusive will return @entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)  * Storing into an existing multi-index entry updates the entry of every index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)  * The marks associated with @index are unaffected unless @entry is %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709)  * Context: Process context.  Takes and releases the xa_lock.  May sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)  * if the @gfp flags permit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)  * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712)  * an XArray, or xa_err(-ENOMEM) if memory allocation failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) void *xa_store_range(struct xarray *xa, unsigned long first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 		unsigned long last, void *entry, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	XA_STATE(xas, xa, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	if (WARN_ON_ONCE(xa_is_internal(entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		return XA_ERROR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	if (last < first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		return XA_ERROR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 		xas_lock(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		if (entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 			unsigned int order = BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 			if (last + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 				order = __ffs(last + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 			xas_set_order(&xas, last, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 			xas_create(&xas, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 			if (xas_error(&xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 				goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 			xas_set_range(&xas, first, last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 			xas_store(&xas, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 			if (xas_error(&xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 				goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 			first += xas_size(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		} while (first <= last);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		xas_unlock(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	} while (xas_nomem(&xas, gfp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	return xas_result(&xas, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) EXPORT_SYMBOL(xa_store_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)  * xa_get_order() - Get the order of an entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)  * @index: Index of the entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)  * Return: A number between 0 and 63 indicating the order of the entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) int xa_get_order(struct xarray *xa, unsigned long index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	XA_STATE(xas, xa, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	int order = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	entry = xas_load(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	if (!xas.xa_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		unsigned int slot = xas.xa_offset + (1 << order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		if (slot >= XA_CHUNK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		if (!xa_is_sibling(xas.xa_node->slots[slot]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		order++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	order += xas.xa_node->shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	return order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) EXPORT_SYMBOL(xa_get_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) #endif /* CONFIG_XARRAY_MULTI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)  * __xa_alloc() - Find somewhere to store this entry in the XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)  * @id: Pointer to ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)  * @limit: Range for allocated ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)  * @entry: New entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)  * @gfp: Memory allocation flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)  * Finds an empty entry in @xa between @limit.min and @limit.max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)  * stores the index into the @id pointer, then stores the entry at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)  * that index.  A concurrent lookup will not see an uninitialised @id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)  * Context: Any context.  Expects xa_lock to be held on entry.  May
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)  * release and reacquire xa_lock if @gfp flags permit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)  * Return: 0 on success, -ENOMEM if memory could not be allocated or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)  * -EBUSY if there are no free entries in @limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) int __xa_alloc(struct xarray *xa, u32 *id, void *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		struct xa_limit limit, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	XA_STATE(xas, xa, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	if (WARN_ON_ONCE(!xa_track_free(xa)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		entry = XA_ZERO_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		xas.xa_index = limit.min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		xas_find_marked(&xas, limit.max, XA_FREE_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		if (xas.xa_node == XAS_RESTART)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 			xas_set_err(&xas, -EBUSY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 			*id = xas.xa_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		xas_store(&xas, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		xas_clear_mark(&xas, XA_FREE_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	} while (__xas_nomem(&xas, gfp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	return xas_error(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) EXPORT_SYMBOL(__xa_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)  * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839)  * @id: Pointer to ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)  * @entry: New entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)  * @limit: Range of allocated ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)  * @next: Pointer to next ID to allocate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)  * @gfp: Memory allocation flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)  * Finds an empty entry in @xa between @limit.min and @limit.max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)  * stores the index into the @id pointer, then stores the entry at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)  * that index.  A concurrent lookup will not see an uninitialised @id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)  * The search for an empty entry will start at @next and will wrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)  * around if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)  * Context: Any context.  Expects xa_lock to be held on entry.  May
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)  * release and reacquire xa_lock if @gfp flags permit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)  * Return: 0 if the allocation succeeded without wrapping.  1 if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)  * allocation succeeded after wrapping, -ENOMEM if memory could not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)  * allocated or -EBUSY if there are no free entries in @limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		struct xa_limit limit, u32 *next, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	u32 min = limit.min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	limit.min = max(min, *next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	ret = __xa_alloc(xa, id, entry, limit, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	if (ret < 0 && limit.min > min) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 		limit.min = min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 		ret = __xa_alloc(xa, id, entry, limit, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 		if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 			ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	if (ret >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		*next = *id + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		if (*next == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 			xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) EXPORT_SYMBOL(__xa_alloc_cyclic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)  * __xa_set_mark() - Set this mark on this entry while locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)  * @index: Index of entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)  * @mark: Mark number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)  * Attempting to set a mark on a %NULL entry does not succeed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894)  * Context: Any context.  Expects xa_lock to be held on entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	XA_STATE(xas, xa, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	void *entry = xas_load(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	if (entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		xas_set_mark(&xas, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) EXPORT_SYMBOL(__xa_set_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)  * __xa_clear_mark() - Clear this mark on this entry while locked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)  * @index: Index of entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)  * @mark: Mark number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912)  * Context: Any context.  Expects xa_lock to be held on entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	XA_STATE(xas, xa, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	void *entry = xas_load(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	if (entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 		xas_clear_mark(&xas, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) EXPORT_SYMBOL(__xa_clear_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)  * xa_get_mark() - Inquire whether this mark is set on this entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927)  * @index: Index of entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928)  * @mark: Mark number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)  * This function uses the RCU read lock, so the result may be out of date
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)  * by the time it returns.  If you need the result to be stable, use a lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)  * Context: Any context.  Takes and releases the RCU lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)  * Return: True if the entry at @index has this mark set, false if it doesn't.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	XA_STATE(xas, xa, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	entry = xas_start(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	while (xas_get_mark(&xas, mark)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 		if (!xa_is_node(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 			goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 		entry = xas_descend(&xas, xa_to_node(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)  found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) EXPORT_SYMBOL(xa_get_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)  * xa_set_mark() - Set this mark on this entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959)  * @index: Index of entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)  * @mark: Mark number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)  * Attempting to set a mark on a %NULL entry does not succeed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)  * Context: Process context.  Takes and releases the xa_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	xa_lock(xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	__xa_set_mark(xa, index, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	xa_unlock(xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) EXPORT_SYMBOL(xa_set_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975)  * xa_clear_mark() - Clear this mark on this entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977)  * @index: Index of entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)  * @mark: Mark number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)  * Clearing a mark always succeeds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)  * Context: Process context.  Takes and releases the xa_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	xa_lock(xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	__xa_clear_mark(xa, index, mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	xa_unlock(xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) EXPORT_SYMBOL(xa_clear_mark);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)  * xa_find() - Search the XArray for an entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)  * @indexp: Pointer to an index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996)  * @max: Maximum index to search to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997)  * @filter: Selection criterion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)  * Finds the entry in @xa which matches the @filter, and has the lowest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000)  * index that is at least @indexp and no more than @max.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)  * If an entry is found, @indexp is updated to be the index of the entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)  * This function is protected by the RCU read lock, so it may not find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)  * entries which are being simultaneously added.  It will not return an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)  * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)  * Context: Any context.  Takes and releases the RCU lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)  * Return: The entry, if found, otherwise %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) void *xa_find(struct xarray *xa, unsigned long *indexp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 			unsigned long max, xa_mark_t filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	XA_STATE(xas, xa, *indexp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		if ((__force unsigned int)filter < XA_MAX_MARKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 			entry = xas_find_marked(&xas, max, filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 			entry = xas_find(&xas, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	} while (xas_retry(&xas, entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	if (entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		*indexp = xas.xa_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) EXPORT_SYMBOL(xa_find);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) static bool xas_sibling(struct xa_state *xas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	struct xa_node *node = xas->xa_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	unsigned long mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	if (!IS_ENABLED(CONFIG_XARRAY_MULTI) || !node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	mask = (XA_CHUNK_SIZE << node->shift) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	return (xas->xa_index & mask) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		((unsigned long)xas->xa_offset << node->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)  * xa_find_after() - Search the XArray for a present entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)  * @indexp: Pointer to an index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)  * @max: Maximum index to search to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)  * @filter: Selection criterion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049)  * Finds the entry in @xa which matches the @filter and has the lowest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)  * index that is above @indexp and no more than @max.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)  * If an entry is found, @indexp is updated to be the index of the entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)  * This function is protected by the RCU read lock, so it may miss entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)  * which are being simultaneously added.  It will not return an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054)  * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)  * Context: Any context.  Takes and releases the RCU lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057)  * Return: The pointer, if found, otherwise %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) void *xa_find_after(struct xarray *xa, unsigned long *indexp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 			unsigned long max, xa_mark_t filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	XA_STATE(xas, xa, *indexp + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	if (xas.xa_index == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		if ((__force unsigned int)filter < XA_MAX_MARKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 			entry = xas_find_marked(&xas, max, filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 			entry = xas_find(&xas, max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		if (xas_invalid(&xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 		if (xas_sibling(&xas))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 		if (!xas_retry(&xas, entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	if (entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 		*indexp = xas.xa_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	return entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) EXPORT_SYMBOL(xa_find_after);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) static unsigned int xas_extract_present(struct xa_state *xas, void **dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 			unsigned long max, unsigned int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	xas_for_each(xas, entry, max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		if (xas_retry(xas, entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		dst[i++] = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 		if (i == n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) static unsigned int xas_extract_marked(struct xa_state *xas, void **dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 			unsigned long max, unsigned int n, xa_mark_t mark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	unsigned int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	xas_for_each_marked(xas, entry, max, mark) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		if (xas_retry(xas, entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		dst[i++] = entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 		if (i == n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129)  * xa_extract() - Copy selected entries from the XArray into a normal array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)  * @xa: The source XArray to copy from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131)  * @dst: The buffer to copy entries into.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)  * @start: The first index in the XArray eligible to be selected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)  * @max: The last index in the XArray eligible to be selected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134)  * @n: The maximum number of entries to copy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)  * @filter: Selection criterion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)  * Copies up to @n entries that match @filter from the XArray.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)  * copied entries will have indices between @start and @max, inclusive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)  * The @filter may be an XArray mark value, in which case entries which are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)  * marked with that mark will be copied.  It may also be %XA_PRESENT, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)  * which case all entries which are not %NULL will be copied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)  * The entries returned may not represent a snapshot of the XArray at a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145)  * moment in time.  For example, if another thread stores to index 5, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)  * index 10, calling xa_extract() may return the old contents of index 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)  * and the new contents of index 10.  Indices not modified while this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)  * function is running will not be skipped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)  * If you need stronger guarantees, holding the xa_lock across calls to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)  * function will prevent concurrent modification.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)  * Context: Any context.  Takes and releases the RCU lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)  * Return: The number of entries copied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 			unsigned long max, unsigned int n, xa_mark_t filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	XA_STATE(xas, xa, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	if (!n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	if ((__force unsigned int)filter < XA_MAX_MARKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 		return xas_extract_marked(&xas, dst, max, n, filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	return xas_extract_present(&xas, dst, max, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) EXPORT_SYMBOL(xa_extract);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)  * xa_delete_node() - Private interface for workingset code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)  * @node: Node to be removed from the tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)  * @update: Function to call to update ancestor nodes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)  * Context: xa_lock must be held on entry and will not be released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) void xa_delete_node(struct xa_node *node, xa_update_node_t update)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	struct xa_state xas = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		.xa = node->array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 		.xa_index = (unsigned long)node->offset <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 				(node->shift + XA_CHUNK_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 		.xa_shift = node->shift + XA_CHUNK_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 		.xa_offset = node->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		.xa_node = xa_parent_locked(node->array, node),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 		.xa_update = update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	xas_store(&xas, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) EXPORT_SYMBOL_GPL(xa_delete_node);	/* For the benefit of the test suite */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194)  * xa_destroy() - Free all internal data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195)  * @xa: XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)  * After calling this function, the XArray is empty and has freed all memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198)  * allocated for its internal data structures.  You are responsible for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)  * freeing the objects referenced by the XArray.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)  * Context: Any context.  Takes and releases the xa_lock, interrupt-safe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) void xa_destroy(struct xarray *xa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	XA_STATE(xas, xa, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	void *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	xas.xa_node = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 	xas_lock_irqsave(&xas, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	entry = xa_head_locked(xa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	RCU_INIT_POINTER(xa->xa_head, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	xas_init_marks(&xas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	if (xa_zero_busy(xa))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		xa_mark_clear(xa, XA_FREE_MARK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	/* lockdep checks we're still holding the lock in xas_free_nodes() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	if (xa_is_node(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		xas_free_nodes(&xas, xa_to_node(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	xas_unlock_irqrestore(&xas, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) EXPORT_SYMBOL(xa_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) #ifdef XA_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) void xa_dump_node(const struct xa_node *node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	unsigned i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	if (!node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	if ((unsigned long)node & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		pr_cont("node %px\n", node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	pr_cont("node %px %s %d parent %px shift %d count %d values %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		"array %px list %px %px marks",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		node, node->parent ? "offset" : "max", node->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 		node->parent, node->shift, node->count, node->nr_values,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		node->array, node->private_list.prev, node->private_list.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	for (i = 0; i < XA_MAX_MARKS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		for (j = 0; j < XA_MARK_LONGS; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 			pr_cont(" %lx", node->marks[i][j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) void xa_dump_index(unsigned long index, unsigned int shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	if (!shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		pr_info("%lu: ", index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 	else if (shift >= BITS_PER_LONG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		pr_info("0-%lu: ", ~0UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	if (!entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 	xa_dump_index(index, shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	if (xa_is_node(entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		if (shift == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 			pr_cont("%px\n", entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 			unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 			struct xa_node *node = xa_to_node(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 			xa_dump_node(node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 			for (i = 0; i < XA_CHUNK_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 				xa_dump_entry(node->slots[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 				      index + (i << node->shift), node->shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	} else if (xa_is_value(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 						xa_to_value(entry), entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	else if (!xa_is_internal(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		pr_cont("%px\n", entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 	else if (xa_is_retry(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 		pr_cont("retry (%ld)\n", xa_to_internal(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	else if (xa_is_sibling(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	else if (xa_is_zero(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 		pr_cont("zero (%ld)\n", xa_to_internal(entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 		pr_cont("UNKNOWN ENTRY (%px)\n", entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) void xa_dump(const struct xarray *xa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	void *entry = xa->xa_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	unsigned int shift = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 			xa->xa_flags, xa_marked(xa, XA_MARK_0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 			xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	if (xa_is_node(entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 	xa_dump_entry(entry, 0, shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) #endif