Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/numa.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * cpumask_next - get the next cpu in a cpumask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * @n: the cpu prior to the place to search (ie. return will be > @n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * @srcp: the cpumask pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * Returns >= nr_cpu_ids if no further cpus set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) unsigned int cpumask_next(int n, const struct cpumask *srcp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	/* -1 is a legal arg here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	if (n != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 		cpumask_check(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) EXPORT_SYMBOL(cpumask_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  * cpumask_next_and - get the next cpu in *src1p & *src2p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * @n: the cpu prior to the place to search (ie. return will be > @n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * @src1p: the first cpumask pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * @src2p: the second cpumask pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * Returns >= nr_cpu_ids if no further cpus set in both.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) int cpumask_next_and(int n, const struct cpumask *src1p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 		     const struct cpumask *src2p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	/* -1 is a legal arg here. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	if (n != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		cpumask_check(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		nr_cpumask_bits, n + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) EXPORT_SYMBOL(cpumask_next_and);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * cpumask_any_but - return a "random" in a cpumask, but not this one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  * @mask: the cpumask to search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * @cpu: the cpu to ignore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  * Often used to find any cpu but smp_processor_id() in a mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51)  * Returns >= nr_cpu_ids if no cpus set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	cpumask_check(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	for_each_cpu(i, mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		if (i != cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) EXPORT_SYMBOL(cpumask_any_but);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * cpumask_next_wrap - helper to implement for_each_cpu_wrap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * @n: the cpu prior to the place to search
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * @mask: the cpumask pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * @start: the start point of the iteration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * @wrap: assume @n crossing @start terminates the iteration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * Returns >= nr_cpu_ids on completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  * Note: the @wrap argument is required for the start condition when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  * we cannot assume @start is set in @mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	int next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	next = cpumask_next(n, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (wrap && n < start && next >= start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		return nr_cpumask_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	} else if (next >= nr_cpumask_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		wrap = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		n = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	return next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) EXPORT_SYMBOL(cpumask_next_wrap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) /* These are not inline because of header tangles. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #ifdef CONFIG_CPUMASK_OFFSTACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * alloc_cpumask_var_node - allocate a struct cpumask on a given node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  * @mask: pointer to cpumask_var_t where the cpumask is returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * @flags: GFP_ flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  * a nop returning a constant 1 (in <linux/cpumask.h>)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * Returns TRUE if memory allocation succeeded, FALSE otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * In addition, mask will be NULL if this fails.  Note that gcc is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * usually smart enough to know that mask can never be NULL if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	*mask = kmalloc_node(cpumask_size(), flags, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #ifdef CONFIG_DEBUG_PER_CPU_MAPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	if (!*mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	return *mask != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) EXPORT_SYMBOL(alloc_cpumask_var_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) EXPORT_SYMBOL(zalloc_cpumask_var_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * alloc_cpumask_var - allocate a struct cpumask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * @mask: pointer to cpumask_var_t where the cpumask is returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * @flags: GFP_ flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * a nop returning a constant 1 (in <linux/cpumask.h>).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  * See alloc_cpumask_var_node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) EXPORT_SYMBOL(alloc_cpumask_var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	return alloc_cpumask_var(mask, flags | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) EXPORT_SYMBOL(zalloc_cpumask_var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * @mask: pointer to cpumask_var_t where the cpumask is returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * a nop (in <linux/cpumask.h>).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * Either returns an allocated (zero-filled) cpumask, or causes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  * system to panic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	*mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	if (!*mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		panic("%s: Failed to allocate %u bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		      cpumask_size());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  * free_cpumask_var - frees memory allocated for a struct cpumask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  * @mask: cpumask to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  * This is safe on a NULL mask.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) void free_cpumask_var(cpumask_var_t mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	kfree(mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) EXPORT_SYMBOL(free_cpumask_var);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * @mask: cpumask to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) void __init free_bootmem_cpumask_var(cpumask_var_t mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	memblock_free_early(__pa(mask), cpumask_size());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  * cpumask_local_spread - select the i'th cpu with local numa cpu's first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * @i: index number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * @node: local numa_node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  * This function selects an online CPU according to a numa aware policy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * local cpus are returned first, followed by non-local ones, then it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  * wraps around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  * It's not very efficient, but useful for setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned int cpumask_local_spread(unsigned int i, int node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	/* Wrap: we always want a cpu. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	i %= num_online_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	if (node == NUMA_NO_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		for_each_cpu(cpu, cpu_online_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			if (i-- == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 				return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		/* NUMA first. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			if (i-- == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 				return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		for_each_cpu(cpu, cpu_online_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			/* Skip NUMA nodes, done above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			if (i-- == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 				return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) EXPORT_SYMBOL(cpumask_local_spread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  * Returns an arbitrary cpu within srcp1 & srcp2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  * Iterated calls using the same srcp1 and srcp2 will be distributed within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  * their intersection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  * Returns >= nr_cpu_ids if the intersection is empty.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int cpumask_any_and_distribute(const struct cpumask *src1p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			       const struct cpumask *src2p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	int next, prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	/* NOTE: our first selection will skip 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	prev = __this_cpu_read(distribute_cpu_mask_prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	next = cpumask_next_and(prev, src1p, src2p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	if (next >= nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		next = cpumask_first_and(src1p, src2p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	if (next < nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		__this_cpu_write(distribute_cpu_mask_prev, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	return next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) EXPORT_SYMBOL(cpumask_any_and_distribute);