^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Divide a 64-bit unsigned number by a 32-bit unsigned number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This routine assumes that the top 32 bits of the dividend are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * non-zero to start with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * On entry, r3 points to the dividend, which get overwritten with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * the 64-bit quotient, and r4 contains the divisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * On exit, r3 contains the remainder.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 2002 Paul Mackerras, IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "ppc_asm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) .globl __div64_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) __div64_32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) lwz r5,0(r3) # get the dividend into r5/r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) lwz r6,4(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) cmplw r5,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) li r7,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) li r8,0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) blt 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) divwu r7,r5,r4 # if dividend.hi >= divisor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) mullw r0,r7,r4 # quotient.hi = dividend.hi / divisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) subf. r5,r0,r5 # dividend.hi %= divisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) beq 3f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) 1: mr r11,r5 # here dividend.hi != 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) andis. r0,r5,0xc000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) bne 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) cntlzw r0,r5 # we are shifting the dividend right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) li r10,-1 # to make it < 2^32, and shifting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) srw r10,r10,r0 # the divisor right the same amount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) addc r9,r4,r10 # rounding up (so the estimate cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) andc r11,r6,r10 # ever be too large, only too small)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) andc r9,r9,r10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) addze r9,r9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) or r11,r5,r11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) rotlw r9,r9,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) rotlw r11,r11,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) divwu r11,r11,r9 # then we divide the shifted quantities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) 2: mullw r10,r11,r4 # to get an estimate of the quotient,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) mulhwu r9,r11,r4 # multiply the estimate by the divisor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) subfc r6,r10,r6 # take the product from the divisor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) add r8,r8,r11 # and add the estimate to the accumulated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) subfe. r5,r9,r5 # quotient
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) bne 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) 3: cmplw r6,r4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) blt 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) divwu r0,r6,r4 # perform the remaining 32-bit division
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) mullw r10,r0,r4 # and get the remainder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) add r8,r8,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) subf r6,r10,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) 4: stw r7,0(r3) # return the quotient in *r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) stw r8,4(r3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) mr r3,r6 # return the remainder in r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * Extended precision shifts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Updated to be valid for shift counts from 0 to 63 inclusive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * -- Gabriel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * R3/R4 has 64 bit value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * R5 has shift count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * result in R3/R4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * ashrdi3: arithmetic right shift (sign propagation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * lshrdi3: logical right shift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * ashldi3: left shift
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) .globl __ashrdi3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) __ashrdi3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) subfic r6,r5,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) addi r7,r5,32 # could be xori, or addi with -32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) sraw r7,r3,r7 # t2 = MSW >> (count-32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) or r4,r4,r6 # LSW |= t1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) sraw r3,r3,r5 # MSW = MSW >> count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) or r4,r4,r7 # LSW |= t2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) .globl __ashldi3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) __ashldi3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) subfic r6,r5,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) addi r7,r5,32 # could be xori, or addi with -32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) or r3,r3,r6 # MSW |= t1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) slw r4,r4,r5 # LSW = LSW << count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) or r3,r3,r7 # MSW |= t2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) blr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) .globl __lshrdi3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) __lshrdi3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) subfic r6,r5,32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) addi r7,r5,32 # could be xori, or addi with -32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) or r4,r4,r6 # LSW |= t1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) srw r3,r3,r5 # MSW = MSW >> count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) or r4,r4,r7 # LSW |= t2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) blr