Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include "queueing.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include "device.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include "peer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include "timers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include "messages.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include "cookie.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include "socket.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <net/ip_tunnels.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) /* Must be called with bh disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) static void update_rx_stats(struct wg_peer *peer, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	struct pcpu_sw_netstats *tstats =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 		get_cpu_ptr(peer->device->dev->tstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	u64_stats_update_begin(&tstats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	++tstats->rx_packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	tstats->rx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	peer->rx_bytes += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	u64_stats_update_end(&tstats->syncp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	put_cpu_ptr(tstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) static size_t validate_header_len(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	if (unlikely(skb->len < sizeof(struct message_header)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_DATA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	    skb->len >= MESSAGE_MINIMUM_LENGTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		return sizeof(struct message_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	    skb->len == sizeof(struct message_handshake_initiation))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		return sizeof(struct message_handshake_initiation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	    skb->len == sizeof(struct message_handshake_response))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		return sizeof(struct message_handshake_response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	    skb->len == sizeof(struct message_handshake_cookie))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		return sizeof(struct message_handshake_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	size_t data_offset, data_len, header_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	struct udphdr *udp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	if (unlikely(!wg_check_packet_protocol(skb) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		     skb_transport_header(skb) < skb->head ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		     (skb_transport_header(skb) + sizeof(struct udphdr)) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 			     skb_tail_pointer(skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		return -EINVAL; /* Bogus IP header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	udp = udp_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	data_offset = (u8 *)udp - skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	if (unlikely(data_offset > U16_MAX ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		     data_offset + sizeof(struct udphdr) > skb->len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		/* Packet has offset at impossible location or isn't big enough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		 * to have UDP fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	data_len = ntohs(udp->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	if (unlikely(data_len < sizeof(struct udphdr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		     data_len > skb->len - data_offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		/* UDP packet is reporting too small of a size or lying about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		 * its size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	data_len -= sizeof(struct udphdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	data_offset = (u8 *)udp + sizeof(struct udphdr) - skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	if (unlikely(!pskb_may_pull(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 				data_offset + sizeof(struct message_header)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		     pskb_trim(skb, data_len + data_offset) < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	skb_pull(skb, data_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	if (unlikely(skb->len != data_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		/* Final len does not agree with calculated len */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	header_len = validate_header_len(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (unlikely(!header_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	__skb_push(skb, data_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	if (unlikely(!pskb_may_pull(skb, data_offset + header_len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	__skb_pull(skb, data_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) static void wg_receive_handshake_packet(struct wg_device *wg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 					struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	enum cookie_mac_state mac_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	struct wg_peer *peer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	/* This is global, so that our load calculation applies to the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	 * system. We don't care about races with it at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	static u64 last_under_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	bool packet_needs_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	bool under_load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		net_dbg_skb_ratelimited("%s: Receiving cookie response from %pISpfsc\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 					wg->dev->name, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		wg_cookie_message_consume(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			(struct message_handshake_cookie *)skb->data, wg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	under_load = atomic_read(&wg->handshake_queue_len) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 			MAX_QUEUED_INCOMING_HANDSHAKES / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	if (under_load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 		last_under_load = ktime_get_coarse_boottime_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	} else if (last_under_load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		under_load = !wg_birthdate_has_expired(last_under_load, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		if (!under_load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			last_under_load = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 					      under_load);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	    (!under_load && mac_state == VALID_MAC_BUT_NO_COOKIE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		packet_needs_cookie = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	} else if (under_load && mac_state == VALID_MAC_BUT_NO_COOKIE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		packet_needs_cookie = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		net_dbg_skb_ratelimited("%s: Invalid MAC of handshake, dropping packet from %pISpfsc\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 					wg->dev->name, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	switch (SKB_TYPE_LE32(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		struct message_handshake_initiation *message =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 			(struct message_handshake_initiation *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 		if (packet_needs_cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 			wg_packet_send_handshake_cookie(wg, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 							message->sender_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		peer = wg_noise_handshake_consume_initiation(message, wg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		if (unlikely(!peer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			net_dbg_skb_ratelimited("%s: Invalid handshake initiation from %pISpfsc\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 						wg->dev->name, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		wg_socket_set_peer_endpoint_from_skb(peer, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		net_dbg_ratelimited("%s: Receiving handshake initiation from peer %llu (%pISpfsc)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 				    wg->dev->name, peer->internal_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 				    &peer->endpoint.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		wg_packet_send_handshake_response(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		struct message_handshake_response *message =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			(struct message_handshake_response *)skb->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		if (packet_needs_cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 			wg_packet_send_handshake_cookie(wg, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 							message->sender_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		peer = wg_noise_handshake_consume_response(message, wg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		if (unlikely(!peer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 			net_dbg_skb_ratelimited("%s: Invalid handshake response from %pISpfsc\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 						wg->dev->name, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		wg_socket_set_peer_endpoint_from_skb(peer, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		net_dbg_ratelimited("%s: Receiving handshake response from peer %llu (%pISpfsc)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 				    wg->dev->name, peer->internal_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 				    &peer->endpoint.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		if (wg_noise_handshake_begin_session(&peer->handshake,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 						     &peer->keypairs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			wg_timers_session_derived(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			wg_timers_handshake_complete(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			/* Calling this function will either send any existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			 * packets in the queue and not send a keepalive, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			 * is the best case, Or, if there's nothing in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 			 * queue, it will send a keepalive, in order to give
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			 * immediate confirmation of the session.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			wg_packet_send_keepalive(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	if (unlikely(!peer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		WARN(1, "Somehow a wrong type of packet wound up in the handshake queue!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	local_bh_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	update_rx_stats(peer, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	local_bh_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	wg_timers_any_authenticated_packet_received(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	wg_timers_any_authenticated_packet_traversal(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	wg_peer_put(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) void wg_packet_handshake_receive_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	struct crypt_queue *queue = container_of(work, struct multicore_worker, work)->ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	struct wg_device *wg = container_of(queue, struct wg_device, handshake_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		wg_receive_handshake_packet(wg, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		atomic_dec(&wg->handshake_queue_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void keep_key_fresh(struct wg_peer *peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	struct noise_keypair *keypair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	bool send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	if (peer->sent_lastminute_handshake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	send = keypair && READ_ONCE(keypair->sending.is_valid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	       keypair->i_am_the_initiator &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	       wg_birthdate_has_expired(keypair->sending.birthdate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	if (unlikely(send)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		peer->sent_lastminute_handshake = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		wg_packet_send_queued_handshake_initiation(peer, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	struct scatterlist sg[MAX_SKB_FRAGS + 8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	struct sk_buff *trailer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	int num_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	if (unlikely(!keypair))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	if (unlikely(!READ_ONCE(keypair->receiving.is_valid) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 		  wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		  keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		WRITE_ONCE(keypair->receiving.is_valid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	PACKET_CB(skb)->nonce =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		le64_to_cpu(((struct message_data *)skb->data)->counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	/* We ensure that the network header is part of the packet before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	 * call skb_cow_data, so that there's no chance that data is removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	 * from the skb, so that later we can extract the original endpoint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	offset = skb->data - skb_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	skb_push(skb, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	num_frags = skb_cow_data(skb, 0, &trailer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	offset += sizeof(struct message_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	skb_pull(skb, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	sg_init_table(sg, num_frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	if (skb_to_sgvec(skb, sg, 0, skb->len) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 					         PACKET_CB(skb)->nonce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 						 keypair->receiving.key))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	/* Another ugly situation of pushing and pulling the header so as to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	 * keep endpoint information intact.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	skb_push(skb, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	if (pskb_trim(skb, skb->len - noise_encrypted_len(0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	skb_pull(skb, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	unsigned long index, index_current, top, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	spin_lock_bh(&counter->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		     their_counter >= REJECT_AFTER_MESSAGES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	++their_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	if (unlikely((COUNTER_WINDOW_SIZE + their_counter) <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		     counter->counter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	index = their_counter >> ilog2(BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	if (likely(their_counter > counter->counter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		index_current = counter->counter >> ilog2(BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		top = min_t(unsigned long, index - index_current,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 			    COUNTER_BITS_TOTAL / BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		for (i = 1; i <= top; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 			counter->backtrack[(i + index_current) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 				((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		counter->counter = their_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 				&counter->backtrack[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	spin_unlock_bh(&counter->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) #include "selftest/counter.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static void wg_packet_consume_data_done(struct wg_peer *peer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 					struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 					struct endpoint *endpoint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	struct net_device *dev = peer->device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	unsigned int len, len_before_trim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	struct wg_peer *routed_peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	wg_socket_set_peer_endpoint(peer, endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	if (unlikely(wg_noise_received_with_keypair(&peer->keypairs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 						    PACKET_CB(skb)->keypair))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		wg_timers_handshake_complete(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		wg_packet_send_staged_packets(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	keep_key_fresh(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	wg_timers_any_authenticated_packet_received(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	wg_timers_any_authenticated_packet_traversal(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	/* A packet with length 0 is a keepalive packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	if (unlikely(!skb->len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		update_rx_stats(peer, message_data_len(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		net_dbg_ratelimited("%s: Receiving keepalive packet from peer %llu (%pISpfsc)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 				    dev->name, peer->internal_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 				    &peer->endpoint.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		goto packet_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	wg_timers_data_received(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	if (unlikely(skb_network_header(skb) < skb->head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		goto dishonest_packet_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (unlikely(!(pskb_network_may_pull(skb, sizeof(struct iphdr)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		       (ip_hdr(skb)->version == 4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 			(ip_hdr(skb)->version == 6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			 pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		goto dishonest_packet_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 	skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	/* We've already verified the Poly1305 auth tag, which means this packet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	 * was not modified in transit. We can therefore tell the networking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	 * stack that all checksums of every layer of encapsulation have already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	 * been checked "by the hardware" and therefore is unnecessary to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	 * again in software.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	skb->csum_level = ~0; /* All levels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	skb->protocol = ip_tunnel_parse_protocol(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	if (skb->protocol == htons(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		len = ntohs(ip_hdr(skb)->tot_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		if (unlikely(len < sizeof(struct iphdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 			goto dishonest_packet_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	} else if (skb->protocol == htons(ETH_P_IPV6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		len = ntohs(ipv6_hdr(skb)->payload_len) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		      sizeof(struct ipv6hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		goto dishonest_packet_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	if (unlikely(len > skb->len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		goto dishonest_packet_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	len_before_trim = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	if (unlikely(pskb_trim(skb, len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		goto packet_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	routed_peer = wg_allowedips_lookup_src(&peer->device->peer_allowedips,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 					       skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	wg_peer_put(routed_peer); /* We don't need the extra reference. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	if (unlikely(routed_peer != peer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		goto dishonest_packet_peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	napi_gro_receive(&peer->napi, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	update_rx_stats(peer, message_data_len(len_before_trim));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) dishonest_packet_peer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 				dev->name, skb, peer->internal_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 				&peer->endpoint.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	++dev->stats.rx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	++dev->stats.rx_frame_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	goto packet_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dishonest_packet_type:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 			    dev->name, peer->internal_id, &peer->endpoint.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	++dev->stats.rx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	++dev->stats.rx_frame_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	goto packet_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) dishonest_packet_size:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 			    dev->name, peer->internal_id, &peer->endpoint.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	++dev->stats.rx_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	++dev->stats.rx_length_errors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	goto packet_processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) packet_processed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int wg_packet_rx_poll(struct napi_struct *napi, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	struct wg_peer *peer = container_of(napi, struct wg_peer, napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	struct noise_keypair *keypair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	struct endpoint endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	enum packet_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	int work_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	bool free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	if (unlikely(budget <= 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	       (state = atomic_read_acquire(&PACKET_CB(skb)->state)) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		       PACKET_STATE_UNCRYPTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		wg_prev_queue_drop_peeked(&peer->rx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		keypair = PACKET_CB(skb)->keypair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		free = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		if (unlikely(state != PACKET_STATE_CRYPTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		if (unlikely(!counter_validate(&keypair->receiving_counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 					       PACKET_CB(skb)->nonce))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 			net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 					    peer->device->dev->name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 					    PACKET_CB(skb)->nonce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 					    keypair->receiving_counter.counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 			goto next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		wg_reset_packet(skb, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		wg_packet_consume_data_done(peer, skb, &endpoint);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		free = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		wg_noise_keypair_put(keypair, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		wg_peer_put(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		if (unlikely(free))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 			dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		if (++work_done >= budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	if (work_done < budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		napi_complete_done(napi, work_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	return work_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) void wg_packet_decrypt_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	struct crypt_queue *queue = container_of(work, struct multicore_worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 						 work)->ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		enum packet_state state =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 			likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 				PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		wg_queue_enqueue_per_peer_rx(skb, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		if (need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 			cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	__le32 idx = ((struct message_data *)skb->data)->key_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	struct wg_peer *peer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	PACKET_CB(skb)->keypair =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		(struct noise_keypair *)wg_index_hashtable_lookup(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 			wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 			&peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	if (unlikely(!wg_noise_keypair_get(PACKET_CB(skb)->keypair)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		goto err_keypair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	if (unlikely(READ_ONCE(peer->is_dead)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 						   wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	if (unlikely(ret == -EPIPE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 	if (likely(!ret || ret == -EPIPE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	wg_noise_keypair_put(PACKET_CB(skb)->keypair, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) err_keypair:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	wg_peer_put(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	if (unlikely(prepare_skb_header(skb, wg) < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	switch (SKB_TYPE_LE32(skb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		int cpu, ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		if (unlikely(!rng_is_initialized()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 			goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		if (atomic_read(&wg->handshake_queue_len) > MAX_QUEUED_INCOMING_HANDSHAKES / 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 			if (spin_trylock_bh(&wg->handshake_queue.ring.producer_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 				ret = __ptr_ring_produce(&wg->handshake_queue.ring, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 				spin_unlock_bh(&wg->handshake_queue.ring.producer_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 			ret = ptr_ring_produce_bh(&wg->handshake_queue.ring, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 						wg->dev->name, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 			goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		atomic_inc(&wg->handshake_queue_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		cpu = wg_cpumask_next_online(&wg->handshake_queue.last_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		/* Queues up a call to packet_process_queued_handshake_packets(skb): */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		queue_work_on(cpu, wg->handshake_receive_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 			      &per_cpu_ptr(wg->handshake_queue.worker, cpu)->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	case cpu_to_le32(MESSAGE_DATA):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		PACKET_CB(skb)->ds = ip_tunnel_get_dsfield(ip_hdr(skb), skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 		wg_packet_consume_data(wg, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		WARN(1, "Non-exhaustive parsing of packet header lead to unknown packet type!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	dev_kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }