^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * $Id: checksum.S,v 1.10 2001/07/06 13:11:32 gniibe Exp $
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * INET An implementation of the TCP/IP protocol suite for the LINUX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * operating system. INET is implemented using the BSD Socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * interface as the means of communication with the user level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * IP/TCP/UDP checksumming routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Authors: Jorge Cwik, <jorge@laser.satlink.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Tom May, <ftom@netcom.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Pentium Pro/II routines:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Alexander Kjeldaas <astor@guardian.no>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Finn Arne Gangstad <finnag@guardian.no>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Lots of code moved from tcp.c and ip.c; see those files
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * for more names.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Changes: Ingo Molnar, converted csum_partial_copy() to 2.1 exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Andi Kleen, add zeroing on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * converted to pure assembler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * SuperH version: Copyright (C) 1999 Niibe Yutaka
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * computes a partial checksum, e.g. for TCP/UDP fragments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * asmlinkage __wsum csum_partial(const void *buf, int len, __wsum sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) .text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) ENTRY(csum_partial)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * Experiments with Ethernet and SLIP connections show that buff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * is aligned on either a 2-byte or 4-byte boundary. We get at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * least a twofold speedup on 486 and Pentium if it is 4-byte aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Fortunately, it is easy to convert 2-byte alignment to 4-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * alignment for the unrolled loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) mov r4, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) tst #3, r0 ! Check alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) bt/s 2f ! Jump if alignment is ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) mov r4, r7 ! Keep a copy to check for alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) tst #1, r0 ! Check alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) bt 21f ! Jump if alignment is boundary of 2bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) ! buf is odd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) tst r5, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) add #-1, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) bt 9f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) mov.b @r4+, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) extu.b r0, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) addc r0, r6 ! t=0 from previous tst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) mov r6, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) shll8 r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) shlr16 r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) shlr8 r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) or r0, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) mov r4, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) tst #2, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) bt 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) 21:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) ! buf is 2 byte aligned (len could be 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) add #-2, r5 ! Alignment uses up two bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) cmp/pz r5 !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) bt/s 1f ! Jump if we had at least two bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) clrt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) bra 6f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) add #2, r5 ! r5 was < 2. Deal with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) mov.w @r4+, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) extu.w r0, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) addc r0, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) bf 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) add #1, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) ! buf is 4 byte aligned (len could be 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) mov r5, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) mov #-5, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) shld r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) tst r1, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) bt/s 4f ! if it's =0, go to 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) clrt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) .align 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) mov.l @r4+, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) mov.l @r4+, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) mov.l @r4+, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) addc r0, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) mov.l @r4+, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) addc r2, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) mov.l @r4+, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) addc r3, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) mov.l @r4+, r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) addc r0, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) mov.l @r4+, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) addc r2, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) mov.l @r4+, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) addc r3, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) addc r0, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) addc r2, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) movt r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) dt r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) bf/s 3b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) cmp/eq #1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) ! here, we know r1==0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) addc r1, r6 ! add carry to r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) mov r5, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) and #0x1c, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) tst r0, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) bt 6f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) ! 4 bytes or more remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) mov r0, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) shlr2 r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) mov #0, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) addc r2, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) mov.l @r4+, r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) movt r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) dt r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) bf/s 5b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) cmp/eq #1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) addc r2, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) addc r1, r6 ! r1==0 here, so it means add carry-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ! 3 bytes or less remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) mov #3, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) and r0, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) tst r5, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) bt 9f ! if it's =0 go to 9f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) mov #2, r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) cmp/hs r1, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) bf 7f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) mov.w @r4+, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) extu.w r0, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) cmp/eq r1, r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) bt/s 8f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) clrt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) shll16 r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) addc r0, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) mov.b @r4+, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) extu.b r0, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #ifndef __LITTLE_ENDIAN__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) shll8 r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) addc r0, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) mov #0, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) addc r0, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 9:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ! Check if the buffer was misaligned, if so realign sum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) mov r7, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) tst #1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) bt 10f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) mov r6, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) shll8 r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) shlr16 r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) shlr8 r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) or r0, r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) rts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) mov r6, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned int csum_partial_copy_generic (const char *src, char *dst, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * Copy from ds while checksumming, otherwise like csum_partial with initial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * sum being ~0U
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define EXC(...) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 9999: __VA_ARGS__ ; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) .section __ex_table, "a"; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) .long 9999b, 6001f ; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ! r4: const char *SRC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ! r5: char *DST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) ! r6: int LEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) ENTRY(csum_partial_copy_generic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) mov #-1,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) mov #3,r0 ! Check src and dest are equally aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) mov r4,r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) and r0,r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) and r5,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) cmp/eq r1,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) bf 3f ! Different alignments, use slow version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) tst #1,r0 ! Check dest word aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) bf 3f ! If not, do it the slow way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) mov #2,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) tst r0,r5 ! Check dest alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) bt 2f ! Jump if alignment is ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) add #-2,r6 ! Alignment uses up two bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) cmp/pz r6 ! Jump if we had at least two bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) bt/s 1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) clrt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) add #2,r6 ! r6 was < 2. Deal with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) bra 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) mov r6,r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 3: ! Handle different src and dest alignments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ! This is not common, so simple byte by byte copy will do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) mov r6,r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) shlr r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) tst r6,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) bt 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) clrt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) .align 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) EXC( mov.b @r4+,r1 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) EXC( mov.b @r4+,r0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) extu.b r1,r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) EXC( mov.b r1,@r5 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) EXC( mov.b r0,@(1,r5) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) extu.b r0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) add #2,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #ifdef __LITTLE_ENDIAN__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) shll8 r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) shll8 r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) or r1,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) addc r0,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) movt r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) dt r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) bf/s 5b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) cmp/eq #1,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) mov #0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) addc r0, r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) mov r2, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) tst #1, r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) bt 7f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) bra 5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) clrt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) ! src and dest equally aligned, but to a two byte boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) ! Handle first two bytes as a special case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) .align 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) EXC( mov.w @r4+,r0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) EXC( mov.w r0,@r5 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) add #2,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) extu.w r0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) addc r0,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) mov #0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) addc r0,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) mov r6,r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) mov #-5,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) shld r0,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) tst r6,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) bt/s 2f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) clrt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) .align 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) EXC( mov.l @r4+,r0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) EXC( mov.l @r4+,r1 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) addc r0,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) EXC( mov.l r0,@r5 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) EXC( mov.l r1,@(4,r5) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) addc r1,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) EXC( mov.l @r4+,r0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) EXC( mov.l @r4+,r1 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) addc r0,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) EXC( mov.l r0,@(8,r5) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) EXC( mov.l r1,@(12,r5) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) addc r1,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) EXC( mov.l @r4+,r0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) EXC( mov.l @r4+,r1 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) addc r0,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) EXC( mov.l r0,@(16,r5) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) EXC( mov.l r1,@(20,r5) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) addc r1,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) EXC( mov.l @r4+,r0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) EXC( mov.l @r4+,r1 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) addc r0,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) EXC( mov.l r0,@(24,r5) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) EXC( mov.l r1,@(28,r5) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) addc r1,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) add #32,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) movt r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) dt r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) bf/s 1b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) cmp/eq #1,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) mov #0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) addc r0,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 2: mov r2,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) mov #0x1c,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) and r0,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) cmp/pl r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) bf/s 4f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) clrt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) shlr2 r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) EXC( mov.l @r4+,r0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) addc r0,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) EXC( mov.l r0,@r5 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) add #4,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) movt r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) dt r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) bf/s 3b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) cmp/eq #1,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) mov #0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) addc r0,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 4: mov r2,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) mov #3,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) and r0,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) cmp/pl r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) bf 7f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) mov #2,r1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) cmp/hs r1,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) bf 5f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) EXC( mov.w @r4+,r0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) EXC( mov.w r0,@r5 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) extu.w r0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) add #2,r5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) cmp/eq r1,r6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) bt/s 6f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) clrt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) shll16 r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) addc r0,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) EXC( mov.b @r4+,r0 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) EXC( mov.b r0,@r5 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) extu.b r0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #ifndef __LITTLE_ENDIAN__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) shll8 r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 6: addc r0,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) mov #0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) addc r0,r7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) # Exception handler:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) .section .fixup, "ax"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 6001:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) rts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) mov #0,r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) .previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) rts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) mov r7,r0