Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * A fast, small, non-recursive O(n log n) sort for the Linux kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * This performs n*log2(n) + 0.37*n + o(n) comparisons on average,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * and 1.5*n*log2(n) + O(n) in the (very contrived) worst case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Glibc qsort() manages n*log2(n) - 1.26*n for random inputs (1.63*n
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * better) at the expense of stack usage and much larger code to avoid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * quicksort's O(n^2) worst case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * is_aligned - is this pointer & size okay for word-wide copying?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * @base: pointer to data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * @size: size of each element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * @align: required alignment (typically 4 or 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  * Returns true if elements can be copied using word loads and stores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * The size must be a multiple of the alignment, and the base address must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * to "if ((a | b) & mask)", so we do that by hand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) __attribute_const__ __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) static bool is_aligned(const void *base, size_t size, unsigned char align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	unsigned char lsbits = (unsigned char)size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	(void)base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	lsbits |= (unsigned char)(uintptr_t)base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	return (lsbits & (align - 1)) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * swap_words_32 - swap two elements in 32-bit chunks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * @a: pointer to the first element to swap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * @b: pointer to the second element to swap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * @n: element size (must be a multiple of 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * Exchange the two objects in memory.  This exploits base+index addressing,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * which basically all CPUs have, to minimize loop overhead computations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53)  * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54)  * bottom of the loop, even though the zero flag is stil valid from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55)  * subtract (since the intervening mov instructions don't alter the flags).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * Gcc 8.1.0 doesn't have that problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static void swap_words_32(void *a, void *b, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		u32 t = *(u32 *)(a + (n -= 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		*(u32 *)(a + n) = *(u32 *)(b + n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		*(u32 *)(b + n) = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	} while (n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * swap_words_64 - swap two elements in 64-bit chunks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * @a: pointer to the first element to swap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * @b: pointer to the second element to swap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * @n: element size (must be a multiple of 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * Exchange the two objects in memory.  This exploits base+index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * addressing, which basically all CPUs have, to minimize loop overhead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * computations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * We'd like to use 64-bit loads if possible.  If they're not, emulating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * one requires base+index+4 addressing which x86 has but most other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * processors do not.  If CONFIG_64BIT, we definitely have 64-bit loads,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  * but it's possible to have 64-bit loads without 64-bit pointers (e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * x32 ABI).  Are there any cases the kernel needs to worry about?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) static void swap_words_64(void *a, void *b, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		u64 t = *(u64 *)(a + (n -= 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		*(u64 *)(a + n) = *(u64 *)(b + n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		*(u64 *)(b + n) = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		/* Use two 32-bit transfers to avoid base+index+4 addressing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		u32 t = *(u32 *)(a + (n -= 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		*(u32 *)(a + n) = *(u32 *)(b + n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		*(u32 *)(b + n) = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		t = *(u32 *)(a + (n -= 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		*(u32 *)(a + n) = *(u32 *)(b + n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		*(u32 *)(b + n) = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	} while (n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * swap_bytes - swap two elements a byte at a time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * @a: pointer to the first element to swap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * @b: pointer to the second element to swap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * @n: element size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * This is the fallback if alignment doesn't allow using larger chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) static void swap_bytes(void *a, void *b, size_t n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		char t = ((char *)a)[--n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		((char *)a)[n] = ((char *)b)[n];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		((char *)b)[n] = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	} while (n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  * The values are arbitrary as long as they can't be confused with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  * a pointer, but small integers make for the smallest compare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  * instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define SWAP_WORDS_64 (swap_func_t)0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define SWAP_WORDS_32 (swap_func_t)1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define SWAP_BYTES    (swap_func_t)2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * The function pointer is last to make tail calls most efficient if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  * compiler decides not to inline this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void do_swap(void *a, void *b, size_t size, swap_func_t swap_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	if (swap_func == SWAP_WORDS_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		swap_words_64(a, b, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	else if (swap_func == SWAP_WORDS_32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		swap_words_32(a, b, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	else if (swap_func == SWAP_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		swap_bytes(a, b, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		swap_func(a, b, (int)size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define _CMP_WRAPPER ((cmp_r_func_t)0L)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static int do_cmp(const void *a, const void *b, cmp_r_func_t cmp, const void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (cmp == _CMP_WRAPPER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		return ((cmp_func_t)(priv))(a, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	return cmp(a, b, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * parent - given the offset of the child, find the offset of the parent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * @i: the offset of the heap element whose parent is sought.  Non-zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * @lsbit: a precomputed 1-bit mask, equal to "size & -size"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * @size: size of each element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * In terms of array indexes, the parent of element j = @i/@size is simply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * (j-1)/2.  But when working in byte offsets, we can't use implicit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * truncation of integer divides.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * Fortunately, we only need one bit of the quotient, not the full divide.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * @size has a least significant bit.  That bit will be clear if @i is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * an even multiple of @size, and set if it's an odd multiple.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  * Logically, we're doing "if (i & lsbit) i -= size;", but since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  * branch is unpredictable, it's done with a bit of clever branch-free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * code instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) __attribute_const__ __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static size_t parent(size_t i, unsigned int lsbit, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	i -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	i -= size & -(i & lsbit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	return i / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  * sort_r - sort an array of elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  * @base: pointer to data to sort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * @num: number of elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  * @size: size of each element
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * @cmp_func: pointer to comparison function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * @swap_func: pointer to swap function or NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * @priv: third argument passed to comparison function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  * This function does a heapsort on the given array.  You may provide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  * a swap_func function if you need to do something more than a memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  * copy (e.g. fix up pointers or auxiliary data), but the built-in swap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  * avoids a slow retpoline and so is significantly faster.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  * Sorting time is O(n log n) both on average and worst-case. While
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  * quicksort is slightly faster on average, it suffers from exploitable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * O(n*n) worst-case behavior and extra memory requirements that make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * it less suitable for kernel use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) void sort_r(void *base, size_t num, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	    cmp_r_func_t cmp_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	    swap_func_t swap_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	    const void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	/* pre-scale counters for performance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	size_t n = num * size, a = (num/2) * size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	const unsigned int lsbit = size & -size;  /* Used to find parent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	if (!a)		/* num < 2 || size == 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	if (!swap_func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		if (is_aligned(base, size, 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 			swap_func = SWAP_WORDS_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		else if (is_aligned(base, size, 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			swap_func = SWAP_WORDS_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			swap_func = SWAP_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	 * Loop invariants:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	 * 1. elements [a,n) satisfy the heap property (compare greater than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	 *    all of their children),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	 * 2. elements [n,num*size) are sorted, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	 * 3. a <= b <= c <= d <= n (whenever they are valid).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		size_t b, c, d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		if (a)			/* Building heap: sift down --a */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 			a -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		else if (n -= size)	/* Sorting: Extract root to --n */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 			do_swap(base, base + n, size, swap_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		else			/* Sort complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		 * Sift element at "a" down into heap.  This is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		 * "bottom-up" variant, which significantly reduces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		 * calls to cmp_func(): we find the sift-down path all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		 * the way to the leaves (one compare per level), then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		 * backtrack to find where to insert the target element.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		 * Because elements tend to sift down close to the leaves,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		 * this uses fewer compares than doing two per level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		 * on the way down.  (A bit more than half as many on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		 * average, 3/4 worst-case.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		for (b = a; c = 2*b + size, (d = c + size) < n;)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 			b = do_cmp(base + c, base + d, cmp_func, priv) >= 0 ? c : d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		if (d == n)	/* Special case last leaf with no sibling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			b = c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		/* Now backtrack from "b" to the correct location for "a" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		while (b != a && do_cmp(base + a, base + b, cmp_func, priv) >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 			b = parent(b, lsbit, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		c = b;			/* Where "a" belongs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		while (b != a) {	/* Shift it into place */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 			b = parent(b, lsbit, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			do_swap(base + b, base + c, size, swap_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) EXPORT_SYMBOL(sort_r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) void sort(void *base, size_t num, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	  cmp_func_t cmp_func,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	  swap_func_t swap_func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	return sort_r(base, num, size, _CMP_WRAPPER, swap_func, cmp_func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) EXPORT_SYMBOL(sort);