^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * include/asm-xtensa/checksum.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2001 - 2005 Tensilica Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #ifndef _XTENSA_CHECKSUM_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define _XTENSA_CHECKSUM_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/in6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/core.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * computes the checksum of a memory block at buff, length len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * and adds in "sum" (32-bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * returns a 32-bit number suitable for feeding into itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * or csum_tcpudp_magic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * this function must be called with even lengths, except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * for the last fragment, which may be odd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * it's best to have buff aligned on a 32-bit boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) asmlinkage __wsum csum_partial(const void *buff, int len, __wsum sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * the same as csum_partial, but copies from src while it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * checksums, and handles user-space pointer exceptions correctly, when needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * here even more important to align src and dst on a 32-bit (or even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * better 64-bit) boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define _HAVE_ARCH_CSUM_AND_COPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * Note: when you get a NULL pointer exception here this means someone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * passed in an incorrect kernel address to one of these functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return csum_partial_copy_generic(src, dst, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) __wsum csum_and_copy_from_user(const void __user *src, void *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (!access_ok(src, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return csum_partial_copy_generic((__force const void *)src, dst, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * Fold a partial checksum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static __inline__ __sum16 csum_fold(__wsum sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) unsigned int __dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) __asm__("extui %1, %0, 16, 16\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) "extui %0 ,%0, 0, 16\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) "add %0, %0, %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) "slli %1, %0, 16\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) "add %0, %0, %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) "extui %0, %0, 16, 16\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) "neg %0, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) "addi %0, %0, -1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) "extui %0, %0, 0, 16\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) : "=r" (sum), "=&r" (__dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) : "0" (sum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return (__force __sum16)sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * This is a version of ip_compute_csum() optimized for IP headers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * which always checksum on 4 octet boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static __inline__ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned int sum, tmp, endaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) "sub %0, %0, %0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #if XCHAL_HAVE_LOOPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) "loopgtz %2, 2f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) "beqz %2, 2f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) "slli %4, %2, 2\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) "add %4, %4, %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) "0:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) "l32i %3, %1, 0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) "add %0, %0, %3\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) "bgeu %0, %3, 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) "addi %0, %0, 1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) "1:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) "addi %1, %1, 4\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #if !XCHAL_HAVE_LOOPS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) "blt %1, %4, 0b\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) "2:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* Since the input registers which are loaded with iph and ihl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) are modified, we must also specify them as outputs, or gcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) will assume they contain their original values. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) : "=r" (sum), "=r" (iph), "=r" (ihl), "=&r" (tmp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) "=&r" (endaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) : "1" (iph), "2" (ihl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return csum_fold(sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static __inline__ __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) __u32 len, __u8 proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) __wsum sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #ifdef __XTENSA_EL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned long len_proto = (len + proto) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #elif defined(__XTENSA_EB__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) unsigned long len_proto = len + proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) # error processor byte order undefined!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) __asm__("add %0, %0, %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) "bgeu %0, %1, 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) "addi %0, %0, 1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) "1:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) "add %0, %0, %2\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) "bgeu %0, %2, 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) "addi %0, %0, 1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) "1:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) "add %0, %0, %3\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) "bgeu %0, %3, 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) "addi %0, %0, 1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) "1:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) : "=r" (sum), "=r" (len_proto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) : "r" (daddr), "r" (saddr), "1" (len_proto), "0" (sum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * computes the checksum of the TCP/UDP pseudo-header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * returns a 16-bit checksum, already complemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static __inline__ __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) __u32 len, __u8 proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) __wsum sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * this routine is used for miscellaneous IP-like checksums, mainly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * in icmp.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static __inline__ __sum16 ip_compute_csum(const void *buff, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return csum_fold (csum_partial(buff, len, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define _HAVE_ARCH_IPV6_CSUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) const struct in6_addr *daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) __u32 len, __u8 proto,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) __wsum sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned int __dummy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) __asm__("l32i %1, %2, 0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) "add %0, %0, %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) "bgeu %0, %1, 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) "addi %0, %0, 1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) "1:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) "l32i %1, %2, 4\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) "add %0, %0, %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) "bgeu %0, %1, 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) "addi %0, %0, 1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) "1:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) "l32i %1, %2, 8\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) "add %0, %0, %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) "bgeu %0, %1, 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) "addi %0, %0, 1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) "1:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) "l32i %1, %2, 12\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) "add %0, %0, %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) "bgeu %0, %1, 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) "addi %0, %0, 1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) "1:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) "l32i %1, %3, 0\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) "add %0, %0, %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) "bgeu %0, %1, 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) "addi %0, %0, 1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) "1:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) "l32i %1, %3, 4\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) "add %0, %0, %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) "bgeu %0, %1, 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) "addi %0, %0, 1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) "1:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) "l32i %1, %3, 8\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) "add %0, %0, %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) "bgeu %0, %1, 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) "addi %0, %0, 1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) "1:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) "l32i %1, %3, 12\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) "add %0, %0, %1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) "bgeu %0, %1, 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) "addi %0, %0, 1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) "1:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) "add %0, %0, %4\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) "bgeu %0, %4, 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) "addi %0, %0, 1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) "1:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) "add %0, %0, %5\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) "bgeu %0, %5, 1f\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) "addi %0, %0, 1\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) "1:\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) : "=r" (sum), "=&r" (__dummy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) : "r" (saddr), "r" (daddr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return csum_fold(sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * Copy and checksum to user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #define HAVE_CSUM_COPY_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static __inline__ __wsum csum_and_copy_to_user(const void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) void __user *dst, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (!access_ok(dst, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return csum_partial_copy_generic(src, (__force void *)dst, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #endif