Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Based on former do_div() implementation from asm-parisc/div64.h:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *	Copyright (C) 1999 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *	Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Generic C version of 64bit/32bit division and modulo, with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * 64bit result and 32bit remainder.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * The fast case for (n>>32 == 0) is handled inline by do_div().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * Code generated for this function might be very inefficient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * for some CPUs. __div64_32() can be overridden by linking arch-specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * or by defining a preprocessor macro in arch/include/asm/div64.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/math64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) /* Not needed on 64bit architectures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #if BITS_PER_LONG == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #ifndef __div64_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	uint64_t rem = *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	uint64_t b = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	uint64_t res, d = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	uint32_t high = rem >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	/* Reduce the thing a bit first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	if (high >= base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		high /= base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		res = (uint64_t) high << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		rem -= (uint64_t) (high*base) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	while ((int64_t)b > 0 && b < rem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		b = b+b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		d = d+d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		if (rem >= b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 			rem -= b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 			res += d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		b >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		d >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	} while (d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	*n = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	return rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) EXPORT_SYMBOL(__div64_32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * div_s64_rem - signed 64bit divide with 64bit divisor and remainder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * @dividend:	64bit dividend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * @divisor:	64bit divisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * @remainder:  64bit remainder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #ifndef div_s64_rem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	u64 quotient;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (dividend < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		*remainder = -*remainder;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		if (divisor > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 			quotient = -quotient;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		if (divisor < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 			quotient = -quotient;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	return quotient;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) EXPORT_SYMBOL(div_s64_rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * @dividend:	64bit dividend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  * @divisor:	64bit divisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94)  * @remainder:  64bit remainder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96)  * This implementation is a comparable to algorithm used by div64_u64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97)  * But this operation, which includes math for calculating the remainder,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98)  * is kept distinct to avoid slowing down the div64_u64 operation on 32bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #ifndef div64_u64_rem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	u32 high = divisor >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	u64 quot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (high == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		u32 rem32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		quot = div_u64_rem(dividend, divisor, &rem32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		*remainder = rem32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		int n = fls(high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		quot = div_u64(dividend >> n, divisor >> n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		if (quot != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			quot--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		*remainder = dividend - quot * divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		if (*remainder >= divisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			quot++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			*remainder -= divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	return quot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) EXPORT_SYMBOL(div64_u64_rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  * div64_u64 - unsigned 64bit divide with 64bit divisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * @dividend:	64bit dividend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  * @divisor:	64bit divisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)  * This implementation is a modified version of the algorithm proposed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  * by the book 'Hacker's Delight'.  The original source and full proof
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * can be found here and is available for use without restriction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * 'http://www.hackersdelight.org/hdcodetxt/divDouble.c.txt'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #ifndef div64_u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) u64 div64_u64(u64 dividend, u64 divisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	u32 high = divisor >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	u64 quot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	if (high == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		quot = div_u64(dividend, divisor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		int n = fls(high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		quot = div_u64(dividend >> n, divisor >> n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		if (quot != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			quot--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		if ((dividend - quot * divisor) >= divisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			quot++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	return quot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) EXPORT_SYMBOL(div64_u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * div64_s64 - signed 64bit divide with 64bit divisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * @dividend:	64bit dividend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  * @divisor:	64bit divisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #ifndef div64_s64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) s64 div64_s64(s64 dividend, s64 divisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	s64 quot, t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	quot = div64_u64(abs(dividend), abs(divisor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	t = (dividend ^ divisor) >> 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	return (quot ^ t) - t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) EXPORT_SYMBOL(div64_s64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #endif /* BITS_PER_LONG == 32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * Iterative div/mod for use when dividend is not expected to be much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  * bigger than divisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	return __iter_div_u64_rem(dividend, divisor, remainder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) EXPORT_SYMBOL(iter_div_u64_rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #ifndef mul_u64_u64_div_u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	u64 res = 0, div, rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	int shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	/* can a * b overflow ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	if (ilog2(a) + ilog2(b) > 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		 * (b * a) / c is equal to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		 *      (b / c) * a +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		 *      (b % c) * a / c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		 * if nothing overflows. Can the 1st multiplication
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		 * overflow? Yes, but we do not care: this can only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		 * happen if the end result can't fit in u64 anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		 * So the code below does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 		 *      res = (b / c) * a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		 *      b = b % c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		div = div64_u64_rem(b, c, &rem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		res = div * a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		b = rem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		shift = ilog2(a) + ilog2(b) - 62;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		if (shift > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			/* drop precision */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			b >>= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			c >>= shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 				return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	return res + div64_u64(a * b, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) EXPORT_SYMBOL(mul_u64_u64_div_u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #endif