^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * IPV6 GSO/GRO offload support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Linux INET6 implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <net/protocol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <net/inet_common.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <net/tcp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <net/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "ip6_offload.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /* All GRO functions are always builtin, except UDP over ipv6, which lays in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * ipv6 module, as it depends on UDPv6 lookup function, so we need special care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * when ipv6 is built as a module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #if IS_BUILTIN(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define indirect_call_gro_receive_l4(f2, f1, cb, head, skb) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unlikely(gro_recursion_inc_test(skb)) ? \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) NAPI_GRO_CB(skb)->flush |= 1, NULL : \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) INDIRECT_CALL_L4(cb, f2, f1, head, skb); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) const struct net_offload *ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct ipv6_opt_hdr *opth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (proto != NEXTHDR_HOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) ops = rcu_dereference(inet6_offloads[proto]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (unlikely(!ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (unlikely(!pskb_may_pull(skb, 8)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) opth = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) len = ipv6_optlen(opth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (unlikely(!pskb_may_pull(skb, len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) opth = (void *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) proto = opth->nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) __skb_pull(skb, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct sk_buff *segs = ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct ipv6hdr *ipv6h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) const struct net_offload *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct frag_hdr *fptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned int payload_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u8 *prevhdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) int offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) bool encap, udpfrag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int nhoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) bool gso_partial;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) nhoff = skb_network_header(skb) - skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) encap = SKB_GSO_CB(skb)->encap_level > 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (encap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) features &= skb->dev->hw_enc_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) ipv6h = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) __skb_pull(skb, sizeof(*ipv6h));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) segs = ERR_PTR(-EPROTONOSUPPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (skb->encapsulation &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) udpfrag = proto == IPPROTO_UDP && encap &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) udpfrag = proto == IPPROTO_UDP && !skb->encapsulation &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) ops = rcu_dereference(inet6_offloads[proto]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (likely(ops && ops->callbacks.gso_segment)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) segs = ops->callbacks.gso_segment(skb, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (!segs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (IS_ERR_OR_NULL(segs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) for (skb = segs; skb; skb = skb->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (gso_partial && skb_is_gso(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) payload_len = skb_shinfo(skb)->gso_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) SKB_GSO_CB(skb)->data_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) skb->head - (unsigned char *)(ipv6h + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) payload_len = skb->len - nhoff - sizeof(*ipv6h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ipv6h->payload_len = htons(payload_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) skb->network_header = (u8 *)ipv6h - skb->head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) skb_reset_mac_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (udpfrag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) int err = ip6_find_1stfragopt(skb, &prevhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) kfree_skb_list(segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) fptr->frag_off = htons(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (skb->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) fptr->frag_off |= htons(IP6_MF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) offset += (ntohs(ipv6h->payload_len) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) sizeof(struct frag_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (encap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) skb_reset_inner_headers(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* Return the total length of all the extension hdrs, following the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static int ipv6_exthdrs_len(struct ipv6hdr *iph,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) const struct net_offload **opps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct ipv6_opt_hdr *opth = (void *)iph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int len = 0, proto, optlen = sizeof(*iph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) proto = iph->nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (proto != NEXTHDR_HOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) *opps = rcu_dereference(inet6_offloads[proto]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (unlikely(!(*opps)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) opth = (void *)opth + optlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) optlen = ipv6_optlen(opth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) len += optlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) proto = opth->nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) const struct net_offload *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) struct sk_buff *pp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) struct sk_buff *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct ipv6hdr *iph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned int nlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned int hlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) unsigned int off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) u16 flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) int proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) off = skb_gro_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) hlen = off + sizeof(*iph);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) iph = skb_gro_header_fast(skb, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (skb_gro_header_hard(skb, hlen)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) iph = skb_gro_header_slow(skb, hlen, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (unlikely(!iph))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) skb_set_network_header(skb, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) skb_gro_pull(skb, sizeof(*iph));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) skb_set_transport_header(skb, skb_gro_offset(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) flush += ntohs(iph->payload_len) != skb_gro_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) proto = iph->nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) ops = rcu_dereference(inet6_offloads[proto]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (!ops || !ops->callbacks.gro_receive) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) __pskb_pull(skb, skb_gro_offset(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) skb_gro_frag0_invalidate(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) proto = ipv6_gso_pull_exthdrs(skb, proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) skb_gro_pull(skb, -skb_transport_offset(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) __skb_push(skb, skb_gro_offset(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ops = rcu_dereference(inet6_offloads[proto]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (!ops || !ops->callbacks.gro_receive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) iph = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) NAPI_GRO_CB(skb)->proto = proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) flush--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) nlen = skb_network_header_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) list_for_each_entry(p, head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) const struct ipv6hdr *iph2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!NAPI_GRO_CB(p)->same_flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) iph2 = (struct ipv6hdr *)(p->data + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) first_word = *(__be32 *)iph ^ *(__be32 *)iph2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* All fields must match except length and Traffic Class.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * XXX skbs on the gro_list have all been parsed and pulled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * already so we don't need to compare nlen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * memcmp() alone below is sufficient, right?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if ((first_word & htonl(0xF00FFFFF)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) not_same_flow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) NAPI_GRO_CB(p)->same_flow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) if (unlikely(nlen > sizeof(struct ipv6hdr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (memcmp(iph + 1, iph2 + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) nlen - sizeof(struct ipv6hdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) goto not_same_flow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /* flush if Traffic Class fields are different */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) NAPI_GRO_CB(p)->flush |= flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* If the previous IP ID value was based on an atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * datagram we can overwrite the value and ignore it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (NAPI_GRO_CB(skb)->is_atomic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) NAPI_GRO_CB(p)->flush_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) NAPI_GRO_CB(skb)->is_atomic = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) NAPI_GRO_CB(skb)->flush |= flush;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) skb_gro_postpull_rcsum(skb, iph, nlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ops->callbacks.gro_receive, head, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) skb_gro_flush_final(skb, pp, flush);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return pp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* Common GRO receive for SIT and IP6IP6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (NAPI_GRO_CB(skb)->encap_mark) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) NAPI_GRO_CB(skb)->flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) NAPI_GRO_CB(skb)->encap_mark = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return ipv6_gro_receive(head, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static struct sk_buff *ip4ip6_gro_receive(struct list_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* Common GRO receive for SIT and IP6IP6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (NAPI_GRO_CB(skb)->encap_mark) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) NAPI_GRO_CB(skb)->flush = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) NAPI_GRO_CB(skb)->encap_mark = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return inet_gro_receive(head, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) const struct net_offload *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int err = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (skb->encapsulation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) skb_set_inner_network_header(skb, nhoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (WARN_ON(!ops || !ops->callbacks.gro_complete))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) udp6_gro_complete, skb, nhoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static int sit_gro_complete(struct sk_buff *skb, int nhoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) skb->encapsulation = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return ipv6_gro_complete(skb, nhoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) static int ip6ip6_gro_complete(struct sk_buff *skb, int nhoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) skb->encapsulation = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return ipv6_gro_complete(skb, nhoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) skb->encapsulation = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return inet_gro_complete(skb, nhoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static struct packet_offload ipv6_packet_offload __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) .type = cpu_to_be16(ETH_P_IPV6),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) .callbacks = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) .gso_segment = ipv6_gso_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) .gro_receive = ipv6_gro_receive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) .gro_complete = ipv6_gro_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return ipv6_gso_segment(skb, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static struct sk_buff *ip4ip6_gso_segment(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return inet_gso_segment(skb, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static struct sk_buff *ip6ip6_gso_segment(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) netdev_features_t features)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) return ipv6_gso_segment(skb, features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static const struct net_offload sit_offload = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) .callbacks = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) .gso_segment = sit_gso_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) .gro_receive = sit_ip6ip6_gro_receive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) .gro_complete = sit_gro_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static const struct net_offload ip4ip6_offload = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) .callbacks = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) .gso_segment = ip4ip6_gso_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) .gro_receive = ip4ip6_gro_receive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) .gro_complete = ip4ip6_gro_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static const struct net_offload ip6ip6_offload = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) .callbacks = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) .gso_segment = ip6ip6_gso_segment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) .gro_receive = sit_ip6ip6_gro_receive,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) .gro_complete = ip6ip6_gro_complete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int __init ipv6_offload_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) if (tcpv6_offload_init() < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (ipv6_exthdrs_offload_init() < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) dev_add_offload(&ipv6_packet_offload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) inet_add_offload(&sit_offload, IPPROTO_IPV6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) inet6_add_offload(&ip4ip6_offload, IPPROTO_IPIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) fs_initcall(ipv6_offload_init);