Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include "queueing.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include "socket.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include "timers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include "device.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "ratelimiter.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include "peer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "messages.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/inet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/inetdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/icmp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <net/icmp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <net/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <net/ip_tunnels.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <net/addrconf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) static LIST_HEAD(device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static int wg_open(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	struct in_device *dev_v4 = __in_dev_get_rtnl(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct inet6_dev *dev_v6 = __in6_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct wg_device *wg = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct wg_peer *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	if (dev_v4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		/* At some point we might put this check near the ip_rt_send_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 		 * redirect call of ip_forward in net/ipv4/ip_forward.c, similar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		 * to the current secpath check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 		IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	if (dev_v6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 		dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	mutex_lock(&wg->device_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	ret = wg_socket_init(wg, wg->incoming_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	list_for_each_entry(peer, &wg->peer_list, peer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		wg_packet_send_staged_packets(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		if (peer->persistent_keepalive_interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 			wg_packet_send_keepalive(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	mutex_unlock(&wg->device_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) static int wg_pm_notification(struct notifier_block *nb, unsigned long action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 			      void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct wg_device *wg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	struct wg_peer *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	/* If the machine is constantly suspending and resuming, as part of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	 * its normal operation rather than as a somewhat rare event, then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	 * don't actually want to clear keys.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	list_for_each_entry(wg, &device_list, device_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		mutex_lock(&wg->device_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		list_for_each_entry(peer, &wg->peer_list, peer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 			del_timer(&peer->timer_zero_key_material);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 			wg_noise_handshake_clear(&peer->handshake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			wg_noise_keypairs_clear(&peer->keypairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		mutex_unlock(&wg->device_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static int wg_stop(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct wg_device *wg = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	struct wg_peer *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	mutex_lock(&wg->device_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	list_for_each_entry(peer, &wg->peer_list, peer_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		wg_packet_purge_staged_packets(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		wg_timers_stop(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		wg_noise_handshake_clear(&peer->handshake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		wg_noise_keypairs_clear(&peer->keypairs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	mutex_unlock(&wg->device_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	while ((skb = ptr_ring_consume(&wg->handshake_queue.ring)) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	atomic_set(&wg->handshake_queue_len, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	wg_socket_reinit(wg, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	struct wg_device *wg = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	struct sk_buff_head packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	struct wg_peer *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	struct sk_buff *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	sa_family_t family;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	u32 mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	if (unlikely(!wg_check_packet_protocol(skb))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		ret = -EPROTONOSUPPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	if (unlikely(!peer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		ret = -ENOKEY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		if (skb->protocol == htons(ETH_P_IP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 					    dev->name, &ip_hdr(skb)->daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		else if (skb->protocol == htons(ETH_P_IPV6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 			net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 					    dev->name, &ipv6_hdr(skb)->daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		goto err_icmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	family = READ_ONCE(peer->endpoint.addr.sa_family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	if (unlikely(family != AF_INET && family != AF_INET6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 		ret = -EDESTADDRREQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 				    dev->name, peer->internal_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		goto err_peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	__skb_queue_head_init(&packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	if (!skb_is_gso(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		skb_mark_not_on_list(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		struct sk_buff *segs = skb_gso_segment(skb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		if (unlikely(IS_ERR(segs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			ret = PTR_ERR(segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 			goto err_peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		skb = segs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	skb_list_walk_safe(skb, skb, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		skb_mark_not_on_list(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		skb = skb_share_check(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		/* We only need to keep the original dst around for icmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		 * so at this point we're in a position to drop it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		skb_dst_drop(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		PACKET_CB(skb)->mtu = mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		__skb_queue_tail(&packets, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	spin_lock_bh(&peer->staged_packet_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	/* If the queue is getting too big, we start removing the oldest packets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	 * until it's small again. We do this before adding the new packet, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	 * we don't remove GSO segments that are in excess.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		++dev->stats.tx_dropped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	skb_queue_splice_tail(&packets, &peer->staged_packet_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	spin_unlock_bh(&peer->staged_packet_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	wg_packet_send_staged_packets(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	wg_peer_put(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) err_peer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	wg_peer_put(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) err_icmp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	if (skb->protocol == htons(ETH_P_IP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	else if (skb->protocol == htons(ETH_P_IPV6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	++dev->stats.tx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static const struct net_device_ops netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	.ndo_open		= wg_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	.ndo_stop		= wg_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	.ndo_start_xmit		= wg_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	.ndo_get_stats64	= ip_tunnel_get_stats64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static void wg_destruct(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	struct wg_device *wg = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	list_del(&wg->device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	mutex_lock(&wg->device_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	rcu_assign_pointer(wg->creating_net, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	wg->incoming_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	wg_socket_reinit(wg, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	/* The final references are cleared in the below calls to destroy_workqueue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	wg_peer_remove_all(wg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	destroy_workqueue(wg->handshake_receive_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	destroy_workqueue(wg->handshake_send_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	destroy_workqueue(wg->packet_crypt_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	wg_packet_queue_free(&wg->handshake_queue, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	wg_packet_queue_free(&wg->decrypt_queue, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	wg_packet_queue_free(&wg->encrypt_queue, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	rcu_barrier(); /* Wait for all the peers to be actually freed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	wg_ratelimiter_uninit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	free_percpu(dev->tstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	kvfree(wg->index_hashtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	kvfree(wg->peer_hashtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	mutex_unlock(&wg->device_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	pr_debug("%s: Interface destroyed\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static const struct device_type device_type = { .name = KBUILD_MODNAME };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static void wg_setup(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	struct wg_device *wg = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 				    NETIF_F_SG | NETIF_F_GSO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 				    NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 			     max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	dev->netdev_ops = &netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	dev->header_ops = &ip_tunnel_header_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	dev->hard_header_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	dev->addr_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	dev->needed_headroom = DATA_PACKET_HEAD_ROOM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	dev->type = ARPHRD_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	dev->flags = IFF_POINTOPOINT | IFF_NOARP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	dev->priv_flags |= IFF_NO_QUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	dev->features |= NETIF_F_LLTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	dev->features |= WG_NETDEV_FEATURES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	dev->hw_features |= WG_NETDEV_FEATURES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	dev->hw_enc_features |= WG_NETDEV_FEATURES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	dev->mtu = ETH_DATA_LEN - overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	SET_NETDEV_DEVTYPE(dev, &device_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	/* We need to keep the dst around in case of icmp replies. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	netif_keep_dst(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	memset(wg, 0, sizeof(*wg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	wg->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static int wg_newlink(struct net *src_net, struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		      struct nlattr *tb[], struct nlattr *data[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		      struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	struct wg_device *wg = netdev_priv(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	rcu_assign_pointer(wg->creating_net, src_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	init_rwsem(&wg->static_identity.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	mutex_init(&wg->socket_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	mutex_init(&wg->device_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	wg_allowedips_init(&wg->peer_allowedips);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	wg_cookie_checker_init(&wg->cookie_checker, wg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	INIT_LIST_HEAD(&wg->peer_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	wg->device_update_gen = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	wg->peer_hashtable = wg_pubkey_hashtable_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	if (!wg->peer_hashtable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	wg->index_hashtable = wg_index_hashtable_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	if (!wg->index_hashtable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		goto err_free_peer_hashtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	if (!dev->tstats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		goto err_free_index_hashtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 			WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	if (!wg->handshake_receive_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		goto err_free_tstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	if (!wg->handshake_send_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		goto err_destroy_handshake_receive;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 			WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	if (!wg->packet_crypt_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		goto err_destroy_handshake_send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 				   MAX_QUEUED_PACKETS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		goto err_destroy_packet_crypt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 				   MAX_QUEUED_PACKETS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		goto err_free_encrypt_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	ret = wg_packet_queue_init(&wg->handshake_queue, wg_packet_handshake_receive_worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 				   MAX_QUEUED_INCOMING_HANDSHAKES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		goto err_free_decrypt_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	ret = wg_ratelimiter_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		goto err_free_handshake_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	ret = register_netdevice(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		goto err_uninit_ratelimiter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	list_add(&wg->device_list, &device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	/* We wait until the end to assign priv_destructor, so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	 * register_netdevice doesn't call it for us if it fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	dev->priv_destructor = wg_destruct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	pr_debug("%s: Interface created\n", dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) err_uninit_ratelimiter:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	wg_ratelimiter_uninit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) err_free_handshake_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	wg_packet_queue_free(&wg->handshake_queue, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) err_free_decrypt_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	wg_packet_queue_free(&wg->decrypt_queue, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) err_free_encrypt_queue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	wg_packet_queue_free(&wg->encrypt_queue, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) err_destroy_packet_crypt:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	destroy_workqueue(wg->packet_crypt_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) err_destroy_handshake_send:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	destroy_workqueue(wg->handshake_send_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) err_destroy_handshake_receive:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	destroy_workqueue(wg->handshake_receive_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) err_free_tstats:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	free_percpu(dev->tstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) err_free_index_hashtable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	kvfree(wg->index_hashtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) err_free_peer_hashtable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	kvfree(wg->peer_hashtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static struct rtnl_link_ops link_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	.kind			= KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	.priv_size		= sizeof(struct wg_device),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	.setup			= wg_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	.newlink		= wg_newlink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static void wg_netns_pre_exit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	struct wg_device *wg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	struct wg_peer *peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	list_for_each_entry(wg, &device_list, device_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		if (rcu_access_pointer(wg->creating_net) == net) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 			pr_debug("%s: Creating namespace exiting\n", wg->dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 			netif_carrier_off(wg->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 			mutex_lock(&wg->device_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 			rcu_assign_pointer(wg->creating_net, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 			wg_socket_reinit(wg, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			list_for_each_entry(peer, &wg->peer_list, peer_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 				wg_socket_clear_peer_endpoint_src(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 			mutex_unlock(&wg->device_update_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) static struct pernet_operations pernet_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	.pre_exit = wg_netns_pre_exit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) int __init wg_device_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	ret = register_pm_notifier(&pm_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	ret = register_pernet_device(&pernet_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		goto error_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	ret = rtnl_link_register(&link_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		goto error_pernet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) error_pernet:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	unregister_pernet_device(&pernet_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) error_pm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	unregister_pm_notifier(&pm_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) void wg_device_uninit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	rtnl_link_unregister(&link_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	unregister_pernet_device(&pernet_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	unregister_pm_notifier(&pm_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }