^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * IPV4 GSO/GRO offload support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Linux INET implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * TCPv4 GSO/GRO support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/indirect_call_wrapper.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <net/protocol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) unsigned int seq, unsigned int mss)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) while (skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) if (before(ts_seq, seq + mss)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) skb_shinfo(skb)->tskey = ts_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) skb = skb->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) seq += mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) const struct iphdr *iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* Set up checksum pseudo header, usually expect stack to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * have done this already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) th->check = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) skb->ip_summed = CHECKSUM_PARTIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return tcp_gso_segment(skb, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct sk_buff *segs = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned int sum_truesize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct tcphdr *th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned int thlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned int seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) __be32 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) unsigned int oldlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned int mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) struct sk_buff *gso_skb = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) __sum16 newcheck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) bool ooo_okay, copy_destructor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) thlen = th->doff * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (thlen < sizeof(*th))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (!pskb_may_pull(skb, thlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) oldlen = (u16)~skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) __skb_pull(skb, thlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) mss = skb_shinfo(skb)->gso_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (unlikely(skb->len <= mss))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* Packet is from an untrusted source, reset gso_segs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) segs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) copy_destructor = gso_skb->destructor == tcp_wfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) ooo_okay = gso_skb->ooo_okay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* All segments but the first should have ooo_okay cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) skb->ooo_okay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) segs = skb_segment(skb, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (IS_ERR(segs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Only first segment might have ooo_okay set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) segs->ooo_okay = ooo_okay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* GSO partial and frag_list segmentation only requires splitting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * the frame into an MSS multiple and possibly a remainder, both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * cases return a GSO skb. So update the mss now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (skb_is_gso(segs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) mss *= skb_shinfo(segs)->gso_segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) delta = htonl(oldlen + (thlen + mss));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) skb = segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) seq = ntohl(th->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) (__force u32)delta));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) while (skb->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) th->fin = th->psh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) th->check = newcheck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) gso_reset_checksum(skb, ~th->check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) th->check = gso_make_checksum(skb, ~th->check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) seq += mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (copy_destructor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) skb->destructor = gso_skb->destructor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) skb->sk = gso_skb->sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) sum_truesize += skb->truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) skb = skb->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) th->seq = htonl(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) th->cwr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Following permits TCP Small Queues to work well with GSO :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * The callback to TCP stack will be called at the time last frag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * is freed at TX completion, and not right now when gso_skb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * is freed by GSO engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (copy_destructor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) swap(gso_skb->sk, skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) swap(gso_skb->destructor, skb->destructor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) sum_truesize += skb->truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) delta = sum_truesize - gso_skb->truesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* In some pathological cases, delta can be negative.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * We need to either use refcount_add() or refcount_sub_and_test()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (likely(delta >= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) refcount_add(delta, &skb->sk->sk_wmem_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) delta = htonl(oldlen + (skb_tail_pointer(skb) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) skb_transport_header(skb)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) skb->data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) (__force u32)delta));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (skb->ip_summed == CHECKSUM_PARTIAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) gso_reset_checksum(skb, ~th->check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) th->check = gso_make_checksum(skb, ~th->check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) struct sk_buff *pp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct sk_buff *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct tcphdr *th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct tcphdr *th2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned int thlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) __be32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unsigned int mss = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned int hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned int off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) off = skb_gro_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) hlen = off + sizeof(*th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) th = skb_gro_header_fast(skb, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (skb_gro_header_hard(skb, hlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) th = skb_gro_header_slow(skb, hlen, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (unlikely(!th))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) thlen = th->doff * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (thlen < sizeof(*th))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) hlen = off + thlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (skb_gro_header_hard(skb, hlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) th = skb_gro_header_slow(skb, hlen, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) if (unlikely(!th))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) skb_gro_pull(skb, thlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) len = skb_gro_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) flags = tcp_flag_word(th);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) list_for_each_entry(p, head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (!NAPI_GRO_CB(p)->same_flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) th2 = tcp_hdr(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) NAPI_GRO_CB(p)->same_flow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) goto out_check_final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) /* Include the IP ID check below from the inner most IP hdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) flush = NAPI_GRO_CB(p)->flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) flush |= (__force int)(flags & TCP_FLAG_CWR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) for (i = sizeof(*th); i < thlen; i += 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) flush |= *(u32 *)((u8 *)th + i) ^
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) *(u32 *)((u8 *)th2 + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /* When we receive our second frame we can made a decision on if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * continue this flow as an atomic flow with a fixed ID or if we use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * an incrementing ID.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (NAPI_GRO_CB(p)->flush_id != 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) NAPI_GRO_CB(p)->count != 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) !NAPI_GRO_CB(p)->is_atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) flush |= NAPI_GRO_CB(p)->flush_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) NAPI_GRO_CB(p)->is_atomic = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) mss = skb_shinfo(p)->gso_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) flush |= (len - 1) >= mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #ifdef CONFIG_TLS_DEVICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) flush |= p->decrypted ^ skb->decrypted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (flush || skb_gro_receive(p, skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) mss = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) goto out_check_final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) out_check_final:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) flush = len < mss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) TCP_FLAG_RST | TCP_FLAG_SYN |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) TCP_FLAG_FIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) pp = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) NAPI_GRO_CB(skb)->flush |= (flush != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int tcp_gro_complete(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) skb->csum_start = (unsigned char *)th - skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) skb->csum_offset = offsetof(struct tcphdr, check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) skb->ip_summed = CHECKSUM_PARTIAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (th->cwr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (skb->encapsulation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) skb->inner_transport_header = skb->transport_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) EXPORT_SYMBOL(tcp_gro_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) INDIRECT_CALLABLE_SCOPE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /* Don't bother verifying checksum if we're going to flush anyway. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (!NAPI_GRO_CB(skb)->flush &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) skb_gro_checksum_validate(skb, IPPROTO_TCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) inet_gro_compute_pseudo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) NAPI_GRO_CB(skb)->flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return tcp_gro_receive(head, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) const struct iphdr *iph = ip_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct tcphdr *th = tcp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) iph->daddr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (NAPI_GRO_CB(skb)->is_atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) return tcp_gro_complete(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static const struct net_offload tcpv4_offload = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) .callbacks = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) .gso_segment = tcp4_gso_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) .gro_receive = tcp4_gro_receive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) .gro_complete = tcp4_gro_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int __init tcpv4_offload_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }