^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_X86_DIV64_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_X86_DIV64_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * do_div() is NOT a C function. It wants to return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * two values (the quotient and the remainder), but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * since that doesn't work very well in C, what it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * does is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * - modifies the 64-bit dividend _in_place_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * - returns the 32-bit remainder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * This ends up being the most efficient "calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * convention" on x86.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define do_div(n, base) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned long __upper, __low, __high, __mod, __base; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) __base = (base); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (__builtin_constant_p(__base) && is_power_of_2(__base)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) __mod = n & (__base - 1); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) n >>= ilog2(__base); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) } else { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) asm("" : "=a" (__low), "=d" (__high) : "A" (n));\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) __upper = __high; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (__high) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) __upper = __high % (__base); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) __high = __high / (__base); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) asm("divl %2" : "=a" (__low), "=d" (__mod) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) : "rm" (__base), "0" (__low), "1" (__upper)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) asm("" : "=A" (n) : "a" (__low), "d" (__high)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) __mod; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u64 v64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u32 v32[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) } d = { dividend };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u32 upper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) upper = d.v32[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) d.v32[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (upper >= divisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) d.v32[1] = upper / divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) upper %= divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) "rm" (divisor), "0" (d.v32[0]), "1" (upper));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return d.v64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define div_u64_rem div_u64_rem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static inline u64 mul_u32_u32(u32 a, u32 b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) u32 high, low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) asm ("mull %[b]" : "=a" (low), "=d" (high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) : [a] "a" (a), [b] "rm" (b) );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return low | ((u64)high) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define mul_u32_u32 mul_u32_u32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) # include <asm-generic/div64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Will generate an #DE when the result doesn't fit u64, could fix with an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * __ex_table[] entry when it becomes an issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static inline u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u64 q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) asm ("mulq %2; divq %3" : "=a" (q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) : "a" (a), "rm" (mul), "rm" (div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) : "rdx");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define mul_u64_u64_div_u64 mul_u64_u64_div_u64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 div)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return mul_u64_u64_div_u64(a, mul, div);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define mul_u64_u32_div mul_u64_u32_div
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #endif /* CONFIG_X86_32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #endif /* _ASM_X86_DIV64_H */