Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * xfrm_device.c - IPsec device offloading code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (c) 2015 secunet Security Networks AG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Author:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Steffen Klassert <steffen.klassert@secunet.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <net/dst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <net/xfrm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #ifdef CONFIG_XFRM_OFFLOAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 				  unsigned int hsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	struct xfrm_offload *xo = xfrm_offload(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	skb_reset_mac_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	if (xo->flags & XFRM_GSO_SEGMENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 		skb->transport_header -= x->props.header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 				    unsigned int hsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	struct xfrm_offload *xo = xfrm_offload(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	if (xo->flags & XFRM_GSO_SEGMENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		skb->transport_header = skb->network_header + hsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	skb_reset_mac_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	pskb_pull(skb, skb->mac_len + x->props.header_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 				  unsigned int hsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct xfrm_offload *xo = xfrm_offload(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	int phlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	if (xo->flags & XFRM_GSO_SEGMENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		skb->transport_header = skb->network_header + hsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	skb_reset_mac_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	if (x->sel.family != AF_INET6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		phlen = IPV4_BEET_PHMAXLEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		if (x->outer_mode.family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 			phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) /* Adjust pointers into the packet when IPsec is done at layer2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	switch (x->outer_mode.encap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	case XFRM_MODE_TUNNEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		if (x->outer_mode.family == AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 			return __xfrm_mode_tunnel_prep(x, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 						       sizeof(struct iphdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		if (x->outer_mode.family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 			return __xfrm_mode_tunnel_prep(x, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 						       sizeof(struct ipv6hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	case XFRM_MODE_TRANSPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		if (x->outer_mode.family == AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			return __xfrm_transport_prep(x, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 						     sizeof(struct iphdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		if (x->outer_mode.family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 			return __xfrm_transport_prep(x, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 						     sizeof(struct ipv6hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	case XFRM_MODE_BEET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		if (x->outer_mode.family == AF_INET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			return __xfrm_mode_beet_prep(x, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 						     sizeof(struct iphdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		if (x->outer_mode.family == AF_INET6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			return __xfrm_mode_beet_prep(x, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 						     sizeof(struct ipv6hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	case XFRM_MODE_ROUTEOPTIMIZATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	case XFRM_MODE_IN_TRIGGER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	struct xfrm_state *x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	struct softnet_data *sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	struct sk_buff *skb2, *nskb, *pskb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	netdev_features_t esp_features = features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct xfrm_offload *xo = xfrm_offload(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct net_device *dev = skb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	struct sec_path *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	if (!xo || (xo->flags & XFRM_XMIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	if (!(features & NETIF_F_HW_ESP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	sp = skb_sec_path(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	x = sp->xvec[sp->len - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	/* This skb was already validated on the upper/virtual dev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	sd = this_cpu_ptr(&softnet_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	err = !skb_queue_empty(&sd->xfrm_backlog);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		*again = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		struct sk_buff *segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		/* Packet got rerouted, fixup features and segment it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		segs = skb_gso_segment(skb, esp_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		if (IS_ERR(segs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 			kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 			atomic_long_inc(&dev->tx_dropped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			consume_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			skb = segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	if (!skb->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		esp_features |= skb->dev->gso_partial_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		xfrm_outer_mode_prep(x, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		xo->flags |= XFRM_DEV_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		err = x->type_offload->xmit(x, skb, esp_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			if (err == -EINPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		skb_push(skb, skb->data - skb_mac_header(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	skb_list_walk_safe(skb, skb2, nskb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		esp_features |= skb->dev->gso_partial_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		skb_mark_not_on_list(skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		xo = xfrm_offload(skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		xo->flags |= XFRM_DEV_RESUME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		xfrm_outer_mode_prep(x, skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		err = x->type_offload->xmit(x, skb2, esp_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			skb2->next = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		} else if (err != -EINPROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			skb2->next = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			kfree_skb_list(skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			if (skb == skb2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 				skb = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 				pskb->next = nskb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		skb_push(skb2, skb2->data - skb_mac_header(skb2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		pskb = skb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		       struct xfrm_user_offload *xuo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	struct xfrm_state_offload *xso = &x->xso;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	xfrm_address_t *saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	xfrm_address_t *daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	if (!x->type_offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	/* We don't yet support UDP encapsulation and TFC padding. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	if (x->encap || x->tfcpad)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	dev = dev_get_by_index(net, xuo->ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 			saddr = &x->props.saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 			daddr = &x->id.daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			saddr = &x->id.daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 			daddr = &x->props.saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 					x->props.family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 					xfrm_smark_get(0, x));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		if (IS_ERR(dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		dev = dst->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		dev_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		xso->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	if (x->props.flags & XFRM_STATE_ESN &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	    !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		xso->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	xso->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	xso->real_dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	xso->num_exthdrs = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	/* Don't forward bit that is not implemented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	xso->flags = xuo->flags & ~XFRM_OFFLOAD_IPV6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	err = dev->xfrmdev_ops->xdo_dev_state_add(x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		xso->num_exthdrs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		xso->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		xso->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		xso->real_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		if (err != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 			return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	int mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	struct dst_entry *dst = skb_dst(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	struct net_device *dev = x->xso.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	if (!x->type_offload || x->encap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	    (!xdst->child->xfrm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 		if (skb->len <= mtu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 			goto ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 			goto ok;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ok:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) void xfrm_dev_resume(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	struct net_device *dev = skb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	int ret = NETDEV_TX_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	struct netdev_queue *txq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	struct softnet_data *sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	txq = netdev_core_pick_tx(dev, skb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	HARD_TX_LOCK(dev, txq, smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	if (!netif_xmit_frozen_or_stopped(txq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		skb = dev_hard_start_xmit(skb, dev, txq, &ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	HARD_TX_UNLOCK(dev, txq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	if (!dev_xmit_complete(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		sd = this_cpu_ptr(&softnet_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		skb_queue_tail(&sd->xfrm_backlog, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		raise_softirq_irqoff(NET_TX_SOFTIRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) EXPORT_SYMBOL_GPL(xfrm_dev_resume);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) void xfrm_dev_backlog(struct softnet_data *sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	struct sk_buff_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	if (skb_queue_empty(xfrm_backlog))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	__skb_queue_head_init(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	spin_lock(&xfrm_backlog->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	skb_queue_splice_init(xfrm_backlog, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	spin_unlock(&xfrm_backlog->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	while (!skb_queue_empty(&list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		skb = __skb_dequeue(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		xfrm_dev_resume(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int xfrm_api_check(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #ifdef CONFIG_XFRM_OFFLOAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	    !(dev->features & NETIF_F_HW_ESP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		return NOTIFY_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	if ((dev->features & NETIF_F_HW_ESP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	    (!(dev->xfrmdev_ops &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	       dev->xfrmdev_ops->xdo_dev_state_add &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	       dev->xfrmdev_ops->xdo_dev_state_delete)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		return NOTIFY_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		return NOTIFY_BAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static int xfrm_dev_register(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	return xfrm_api_check(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static int xfrm_dev_feat_change(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	return xfrm_api_check(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static int xfrm_dev_down(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	if (dev->features & NETIF_F_HW_ESP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		xfrm_dev_state_flush(dev_net(dev), dev, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	case NETDEV_REGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		return xfrm_dev_register(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	case NETDEV_FEAT_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		return xfrm_dev_feat_change(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	case NETDEV_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	case NETDEV_UNREGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		return xfrm_dev_down(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static struct notifier_block xfrm_dev_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	.notifier_call	= xfrm_dev_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) void __init xfrm_dev_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	register_netdevice_notifier(&xfrm_dev_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }