Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _ASM_GENERIC_DIV64_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _ASM_GENERIC_DIV64_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Optimization for constant divisors on 32-bit machines:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Copyright (C) 2006-2015 Nicolas Pitre
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * The semantics of do_div() are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * uint32_t do_div(uint64_t *n, uint32_t base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * 	uint32_t remainder = *n % base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * 	*n = *n / base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * 	return remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * NOTE: macro parameter n is evaluated multiple times,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *       beware of side effects!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #if BITS_PER_LONG == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * do_div - returns 2 values: calculate remainder and update new dividend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * @n: uint64_t dividend (will be updated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * @base: uint32_t divisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * Summary:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * ``uint32_t remainder = n % base;``
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * ``n = n / base;``
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * Return: (uint32_t)remainder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * NOTE: macro parameter @n is evaluated multiple times,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * beware of side effects!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) # define do_div(n,base) ({					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	uint32_t __base = (base);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	uint32_t __rem;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	__rem = ((uint64_t)(n)) % __base;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	(n) = ((uint64_t)(n)) / __base;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	__rem;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #elif BITS_PER_LONG == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56)  * If the divisor happens to be constant, we determine the appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57)  * inverse at compile time to turn the division into a few inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58)  * multiplications which ought to be much faster. And yet only if compiling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59)  * with a sufficiently recent gcc version to perform proper 64-bit constant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * propagation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * (It is unfortunate that gcc doesn't perform all this internally.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #ifndef __div64_const32_is_OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define __div64_const32_is_OK (__GNUC__ >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define __div64_const32(n, ___b)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	/*								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	 * Multiplication by reciprocal of b: n / b = n * (p / b) / p	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	 *								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	 * We rely on the fact that most of this code gets optimized	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 * away at compile time due to constant propagation and only	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	 * a few multiplication instructions should remain.		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	 * Hence this monstrous macro (static inline doesn't always	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	 * do the trick here).						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	 */								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	uint64_t ___res, ___x, ___t, ___m, ___n = (n);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	uint32_t ___p, ___bias;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	/* determine MSB of b */					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	___p = 1 << ilog2(___b);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	/* compute m = ((p << 64) + b - 1) / b */			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	___m = (~0ULL / ___b) * ___p;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	/* one less than the dividend with highest result */		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	___x = ~0ULL / ___b * ___b - 1;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	/* test our ___m with res = m * x / (p << 64) */		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	___t = ___res += (___m & 0xffffffff) * (___x >> 32);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	___res += (___x & 0xffffffff) * (___m >> 32);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	___t = (___res < ___t) ? (1ULL << 32) : 0;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	___res = (___res >> 32) + ___t;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	___res += (___m >> 32) * (___x >> 32);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	___res /= ___p;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	/* Now sanitize and optimize what we've got. */			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (~0ULL % (___b / (___b & -___b)) == 0) {			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		/* special case, can be simplified to ... */		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		___n /= (___b & -___b);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		___m = ~0ULL / (___b / (___b & -___b));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		___p = 1;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		___bias = 1;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	} else if (___res != ___x / ___b) {				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		/*							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		 * We can't get away without a bias to compensate	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		 * for bit truncation errors.  To avoid it we'd need an	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		 * additional bit to represent m which would overflow	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		 * a 64-bit variable.					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		 *							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		 * Instead we do m = p / b and n / b = (n * m + m) / p.	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		 */							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		___bias = 1;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		/* Compute m = (p << 64) / b */				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		___m = (~0ULL / ___b) * ___p;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		___m += ((~0ULL % ___b + 1) * ___p) / ___b;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	} else {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		/*							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		 * Reduce m / p, and try to clear bit 31 of m when	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		 * possible, otherwise that'll need extra overflow	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		 * handling later.					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		 */							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		uint32_t ___bits = -(___m & -___m);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		___bits |= ___m >> 32;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		___bits = (~___bits) << 1;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		/*							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		 * If ___bits == 0 then setting bit 31 is  unavoidable.	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		 * Simply apply the maximum possible reduction in that	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		 * case. Otherwise the MSB of ___bits indicates the	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		 * best reduction we should apply.			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		 */							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		if (!___bits) {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			___p /= (___m & -___m);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			___m /= (___m & -___m);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		} else {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 			___p >>= ilog2(___bits);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 			___m >>= ilog2(___bits);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		}							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		/* No bias needed. */					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		___bias = 0;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	}								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	/*								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	 * Now we have a combination of 2 conditions:			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	 *								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	 * 1) whether or not we need to apply a bias, and		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	 *								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	 * 2) whether or not there might be an overflow in the cross	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	 *    product determined by (___m & ((1 << 63) | (1 << 31))).	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	 *								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	 * Select the best way to do (m_bias + m * n) / (1 << 64).	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	 * From now on there will be actual runtime code generated.	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	 */								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	___res = __arch_xprod_64(___m, ___n, ___bias);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	___res /= ___p;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #ifndef __arch_xprod_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * Default C implementation for __arch_xprod_64()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  * Semantic:  retval = ((bias ? m : 0) + m * n) >> 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * The product is a 128-bit value, scaled down to 64 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * Assuming constant propagation to optimize away unused conditional code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  * Architectures may provide their own optimized assembly implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	uint32_t m_lo = m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	uint32_t m_hi = m >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	uint32_t n_lo = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	uint32_t n_hi = n >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	uint64_t res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	uint32_t res_lo, res_hi, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	if (!bias) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		res = ((uint64_t)m_lo * n_lo) >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	} else if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		/* there can't be any overflow here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		res = (m + (uint64_t)m_lo * n_lo) >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		res = m + (uint64_t)m_lo * n_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		res_lo = res >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		res_hi = (res_lo < m_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		res = res_lo | ((uint64_t)res_hi << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		/* there can't be any overflow here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		res += (uint64_t)m_lo * n_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		res += (uint64_t)m_hi * n_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		res >>= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		res += (uint64_t)m_lo * n_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		tmp = res >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		res += (uint64_t)m_hi * n_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		res_lo = res >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		res_hi = (res_lo < tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		res = res_lo | ((uint64_t)res_hi << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	res += (uint64_t)m_hi * n_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #ifndef __div64_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /* The unnecessary pointer compare is there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)  * to check for type safety (n must be 64bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) # define do_div(n,base) ({				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	uint32_t __base = (base);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	uint32_t __rem;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	(void)(((typeof((n)) *)0) == ((uint64_t *)0));	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (__builtin_constant_p(__base) &&		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	    is_power_of_2(__base)) {			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		__rem = (n) & (__base - 1);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		(n) >>= ilog2(__base);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	} else if (__div64_const32_is_OK &&		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		   __builtin_constant_p(__base) &&	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		   __base != 0) {			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		uint32_t __res_lo, __n_lo = (n);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		(n) = __div64_const32(n, __base);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		/* the remainder can be computed with 32-bit regs */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		__res_lo = (n);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		__rem = __n_lo - __res_lo * __base;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	} else if (likely(((n) >> 32) == 0)) {		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		__rem = (uint32_t)(n) % __base;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		(n) = (uint32_t)(n) / __base;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	} else 						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		__rem = __div64_32(&(n), __base);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	__rem;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #else /* BITS_PER_LONG == ?? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) # error do_div() does not yet support the C64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #endif /* BITS_PER_LONG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #endif /* _ASM_GENERIC_DIV64_H */