^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * INET An implementation of the TCP/IP protocol suite for the LINUX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * operating system. INET is implemented using the BSD Socket
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * interface as the means of communication with the user level.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * IP/TCP/UDP checksumming routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Authors: Jorge Cwik, <jorge@laser.satlink.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Tom May, <ftom@netcom.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Lots of code moved from tcp.c and ip.c; see those files
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * for more names.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Fixed some nasty bugs, causing some horrible crashes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * A: At some points, the sum (%0) was used as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * length-counter instead of the length counter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * (%1). Thanks to Roman Hodek for pointing this out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * B: GCC seems to mess up if one uses too many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * data-registers to hold input values and one tries to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * specify d0 and d1 as scratch registers. Letting gcc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * choose these registers itself solves the problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* Revised by Kenneth Albanowski for m68knommu. Basic problem: unaligned access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) kills, so most of the assembly has to go. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #ifndef do_csum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static inline unsigned short from32to16(unsigned int x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /* add up 16-bit and 16-bit for 16+c bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) x = (x & 0xffff) + (x >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* add up carry.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) x = (x & 0xffff) + (x >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static unsigned int do_csum(const unsigned char *buff, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int odd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (len <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) odd = 1 & (unsigned long) buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (odd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #ifdef __LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) result += (*buff << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) result = *buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) buff++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (len >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if (2 & (unsigned long) buff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) result += *(unsigned short *) buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) len -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) buff += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (len >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) const unsigned char *end = buff + ((unsigned)len & ~3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned int carry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned int w = *(unsigned int *) buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) buff += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) result += carry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) result += w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) carry = (w > result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) } while (buff < end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) result += carry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) result = (result & 0xffff) + (result >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (len & 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) result += *(unsigned short *) buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) buff += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) if (len & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #ifdef __LITTLE_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) result += *buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) result += (*buff << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) result = from32to16(result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (odd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #ifndef ip_fast_csum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * This is a version of ip_compute_csum() optimized for IP headers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * which always checksum on 4 octet boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return (__force __sum16)~do_csum(iph, ihl*4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) EXPORT_SYMBOL(ip_fast_csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * computes the checksum of a memory block at buff, length len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * and adds in "sum" (32-bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * returns a 32-bit number suitable for feeding into itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * or csum_tcpudp_magic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * this function must be called with even lengths, except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * for the last fragment, which may be odd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * it's best to have buff aligned on a 32-bit boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) __wsum csum_partial(const void *buff, int len, __wsum wsum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unsigned int sum = (__force unsigned int)wsum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) unsigned int result = do_csum(buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* add in old sum, and carry.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) result += sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (sum > result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) result += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return (__force __wsum)result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) EXPORT_SYMBOL(csum_partial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * this routine is used for miscellaneous IP-like checksums, mainly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * in icmp.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) __sum16 ip_compute_csum(const void *buff, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return (__force __sum16)~do_csum(buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) EXPORT_SYMBOL(ip_compute_csum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #ifndef csum_tcpudp_nofold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static inline u32 from64to32(u64 x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* add up 32-bit and 32-bit for 32+c bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) x = (x & 0xffffffff) + (x >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* add up carry.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) x = (x & 0xffffffff) + (x >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return (u32)x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) __u32 len, __u8 proto, __wsum sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) unsigned long long s = (__force u32)sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) s += (__force u32)saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) s += (__force u32)daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #ifdef __BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) s += proto + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) s += (proto + len) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) return (__force __wsum)from64to32(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) EXPORT_SYMBOL(csum_tcpudp_nofold);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #endif