^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_HASH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_HASH_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * HP-PA only implements integer multiply in the FPU. However, for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * integer multiplies by constant, it has a number of shift-and-add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * (but no shift-and-subtract, sigh!) instructions that a compiler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * can synthesize a code sequence with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Unfortunately, GCC isn't very efficient at using them. For example
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * it uses three instructions for "x *= 21" when only two are needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * But we can find a sequence manually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define HAVE_ARCH__HASH_32 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * This is a multiply by GOLDEN_RATIO_32 = 0x61C88647 optimized for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * PA7100 pairing rules. This is an in-order 2-way superscalar processor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Only one instruction in a pair may be a shift (by more than 3 bits),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * but other than that, simple ALU ops (including shift-and-add by up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * to 3 bits) may be paired arbitrarily.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * PA8xxx processors also dual-issue ALU instructions, although with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * fewer constraints, so this schedule is good for them, too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * This 6-step sequence was found by Yevgen Voronenko's implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * of the Hcub algorithm at http://spiral.ece.cmu.edu/mcm/gen.html.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static inline u32 __attribute_const__ __hash_32(u32 x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) u32 a, b, c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Phase 1: Compute a = (x << 19) + x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * b = (x << 9) + a, c = (x << 23) + b.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) a = x << 19; /* Two shifts can't be paired */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) b = x << 9; a += x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) c = x << 23; b += a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) c += b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* Phase 2: Return (b<<11) + (c<<6) + (a<<3) - c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) b <<= 11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) a += c << 3; b -= c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return (a << 3) + b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #if BITS_PER_LONG == 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define HAVE_ARCH_HASH_64 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Finding a good shift-and-add chain for GOLDEN_RATIO_64 is tricky,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * because available software for the purpose chokes on constants this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * large. (It's mostly designed for compiling FIR filter coefficients
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * into FPGAs.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * However, Jason Thong pointed out a work-around. The Hcub software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * (http://spiral.ece.cmu.edu/mcm/gen.html) is designed for *multiple*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * constant multiplication, and is good at finding shift-and-add chains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * which share common terms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * Looking at 0x0x61C8864680B583EB in binary:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * 0110000111001000100001100100011010000000101101011000001111101011
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * \______________/ \__________/ \_______/ \________/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * \____________________________/ \____________________/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * you can see the non-zero bits are divided into several well-separated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * blocks. Hcub can find algorithms for those terms separately, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * can then be shifted and added together.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * Dividing the input into 2, 3 or 4 blocks, Hcub can find solutions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * with 10, 9 or 8 adds, respectively, making a total of 11 for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * whole number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * Using just two large blocks, 0xC3910C8D << 31 in the high bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * and 0xB583EB in the low bits, produces as good an algorithm as any,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * and with one more small shift than alternatives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * The high bits are a larger number and more work to compute, as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * as needing one extra cycle to shift left 31 bits before the final
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * addition, so they are the critical path for scheduling. The low bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * can fit into the scheduling slots left over.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * This _ASSIGN(dst, src) macro performs "dst = src", but prevents GCC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * from inferring anything about the value assigned to "dest".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * This prevents it from mis-optimizing certain sequences.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * In particular, gcc is annoyingly eager to combine consecutive shifts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * Given "x <<= 19; y += x; z += x << 1;", GCC will turn this into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * "y += x << 19; z += x << 20;" even though the latter sequence needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * an additional instruction and temporary register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * Because no actual assembly code is generated, this construct is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * usefully portable across all GCC platforms, and so can be test-compiled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * on non-PA systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * In two places, additional unused input dependencies are added. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * forces GCC's scheduling so it does not rearrange instructions too much.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Because the PA-8xxx is out of order, I'm not sure how much this matters,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * but why make it more difficult for the processor than necessary?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define _ASSIGN(dst, src, ...) asm("" : "=r" (dst) : "0" (src), ##__VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * Multiply by GOLDEN_RATIO_64 = 0x0x61C8864680B583EB using a heavily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * optimized shift-and-add sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * Without the final shift, the multiply proper is 19 instructions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * 10 cycles and uses only 4 temporaries. Whew!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * You are not expected to understand this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static __always_inline u32 __attribute_const__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) hash_64(u64 a, unsigned int bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) u64 b, c, d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * Encourage GCC to move a dynamic shift to %sar early,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * thereby freeing up an additional temporary register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (!__builtin_constant_p(bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) asm("" : "=q" (bits) : "0" (64 - bits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) bits = 64 - bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) _ASSIGN(b, a*5); c = a << 13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) b = (b << 2) + a; _ASSIGN(d, a << 17);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) a = b + (a << 1); c += d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) d = a << 10; _ASSIGN(a, a << 19);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) d = a - d; _ASSIGN(a, a << 4, "X" (d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) c += b; a += b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) d -= c; c += a << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) a += c << 3; _ASSIGN(b, b << (7+31), "X" (c), "X" (d));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) a <<= 31; b += d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) a += b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return a >> bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #undef _ASSIGN /* We're a widely-used header file, so don't litter! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #endif /* BITS_PER_LONG == 64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #endif /* _ASM_HASH_H */