Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2017 Imagination Technologies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Author: Paul Burton <paul.burton@mips.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <asm/cmpxchg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 	u32 old32, new32, load32, mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 	volatile u32 *ptr32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 	unsigned int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	/* Check that ptr is naturally aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 	WARN_ON((unsigned long)ptr & (size - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	/* Mask value to the correct size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	val &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	 * Calculate a shift & mask that correspond to the value we wish to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	 * exchange within the naturally aligned 4 byte integerthat includes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	 * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	shift = (unsigned long)ptr & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 		shift ^= sizeof(u32) - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	shift *= BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	mask <<= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	 * Calculate a pointer to the naturally aligned 4 byte integer that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	 * includes our byte of interest, and load its value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	load32 = *ptr32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		old32 = load32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		new32 = (load32 & ~mask) | (val << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		load32 = cmpxchg(ptr32, old32, new32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	} while (load32 != old32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	return (load32 & mask) >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 			      unsigned long new, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	u32 mask, old32, new32, load32, load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	volatile u32 *ptr32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	unsigned int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	/* Check that ptr is naturally aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	WARN_ON((unsigned long)ptr & (size - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	/* Mask inputs to the correct size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	old &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	new &= mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	 * Calculate a shift & mask that correspond to the value we wish to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	 * compare & exchange within the naturally aligned 4 byte integer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	 * that includes it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	shift = (unsigned long)ptr & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		shift ^= sizeof(u32) - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	shift *= BITS_PER_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	mask <<= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	 * Calculate a pointer to the naturally aligned 4 byte integer that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	 * includes our byte of interest, and load its value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	load32 = *ptr32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		 * Ensure the byte we want to exchange matches the expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		 * old value, and if not then bail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		load = (load32 & mask) >> shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		if (load != old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 			return load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		 * Calculate the old & new values of the naturally aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		 * 4 byte integer that include the byte we want to exchange.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		 * Attempt to exchange the old value for the new value, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		 * return if we succeed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		old32 = (load32 & ~mask) | (old << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		new32 = (load32 & ~mask) | (new << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 		load32 = cmpxchg(ptr32, old32, new32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		if (load32 == old32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			return old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }