^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_POWERPC_CHECKSUM_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_POWERPC_CHECKSUM_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #ifdef __KERNEL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/in6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Computes the checksum of a memory block at src, length len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * and adds in "sum" (32-bit), while copying the block to dst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * If an access exception occurs on src or dst, it stores -EFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * to *src_err or *dst_err respectively (if that pointer is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * NULL), and, for an error on src, zeroes the rest of dst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Like csum_partial, this must be called with even lengths,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * except for the last fragment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) extern __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define HAVE_CSUM_COPY_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define _HAVE_ARCH_CSUM_AND_COPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define csum_partial_copy_nocheck(src, dst, len) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) csum_partial_copy_generic((src), (dst), (len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * turns a 32-bit partial checksum (e.g. from csum_partial) into a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * 1's complement 16-bit checksum.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static inline __sum16 csum_fold(__wsum sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* swap the two 16-bit halves of sum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* if there is a carry from adding the two 16-bit halves,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) it will carry from the lower half into the upper half,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) giving us the correct sum in the upper half. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static inline u32 from64to32(u64 x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return (x + ror64(x, 32)) >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) __u8 proto, __wsum sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #ifdef __powerpc64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) u64 s = (__force u32)sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) s += (__force u32)saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) s += (__force u32)daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #ifdef __BIG_ENDIAN__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) s += proto + len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) s += (proto + len) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return (__force __wsum) from64to32(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) __asm__("\n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) addc %0,%0,%1 \n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) adde %0,%0,%2 \n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) adde %0,%0,%3 \n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) addze %0,%0 \n\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) : "=r" (sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) : "r" (daddr), "r"(saddr), "r"(proto + len), "0"(sum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * computes the checksum of the TCP/UDP pseudo-header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * returns a 16-bit checksum, already complemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) __u8 proto, __wsum sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define HAVE_ARCH_CSUM_ADD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static inline __wsum csum_add(__wsum csum, __wsum addend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #ifdef __powerpc64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u64 res = (__force u64)csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (__builtin_constant_p(csum) && csum == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return addend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (__builtin_constant_p(addend) && addend == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #ifdef __powerpc64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) res += (__force u64)addend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return (__force __wsum)((u32)res + (res >> 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) asm("addc %0,%0,%1;"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) "addze %0,%0;"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) : "+r" (csum) : "r" (addend) : "xer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return csum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * This is a version of ip_compute_csum() optimized for IP headers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * which always checksum on 4 octet boundaries. ihl is the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * of 32-bit words and is always >= 5.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static inline __wsum ip_fast_csum_nofold(const void *iph, unsigned int ihl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) const u32 *ptr = (const u32 *)iph + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #ifdef __powerpc64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) u64 s = *(const u32 *)iph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) for (i = 0; i < ihl - 1; i++, ptr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) s += *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return (__force __wsum)from64to32(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) __wsum sum, tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) asm("mtctr %3;"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) "addc %0,%4,%5;"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) "1: lwzu %1, 4(%2);"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) "adde %0,%0,%1;"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) "bdnz 1b;"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) "addze %0,%0;"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) : "=r" (sum), "=r" (tmp), "+b" (ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) : "r" (ihl - 2), "r" (*(const u32 *)iph), "r" (*ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) : "ctr", "xer", "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return csum_fold(ip_fast_csum_nofold(iph, ihl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * computes the checksum of a memory block at buff, length len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * and adds in "sum" (32-bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * returns a 32-bit number suitable for feeding into itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * or csum_tcpudp_magic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * this function must be called with even lengths, except
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * for the last fragment, which may be odd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * it's best to have buff aligned on a 32-bit boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) __wsum __csum_partial(const void *buff, int len, __wsum sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (len == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) sum = csum_add(sum, (__force __wsum)*(const u16 *)buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (len >= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) sum = csum_add(sum, (__force __wsum)*(const u32 *)buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (len == 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) sum = csum_add(sum, (__force __wsum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) *(const u16 *)(buff + 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (len >= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) sum = csum_add(sum, (__force __wsum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) *(const u32 *)(buff + 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (len == 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) sum = csum_add(sum, (__force __wsum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) *(const u16 *)(buff + 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (len >= 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) sum = csum_add(sum, (__force __wsum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) *(const u32 *)(buff + 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (len == 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) sum = csum_add(sum, (__force __wsum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) *(const u16 *)(buff + 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (len >= 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) sum = csum_add(sum, (__force __wsum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) *(const u32 *)(buff + 12));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) } else if (__builtin_constant_p(len) && (len & 3) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) sum = csum_add(sum, ip_fast_csum_nofold(buff, len >> 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) sum = __csum_partial(buff, len, sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return sum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * this routine is used for miscellaneous IP-like checksums, mainly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * in icmp.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static inline __sum16 ip_compute_csum(const void *buff, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return csum_fold(csum_partial(buff, len, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define _HAVE_ARCH_IPV6_CSUM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) const struct in6_addr *daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) __u32 len, __u8 proto, __wsum sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #endif /* __KERNEL__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #endif