^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "queueing.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "timers.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "device.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include "peer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include "socket.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "messages.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include "cookie.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/uio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/inetdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <net/ip_tunnels.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <net/udp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static void wg_packet_send_handshake_initiation(struct wg_peer *peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct message_handshake_initiation packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) REKEY_TIMEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) return; /* This function is rate limited. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) peer->device->dev->name, peer->internal_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) &peer->endpoint.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) if (wg_noise_handshake_create_initiation(&packet, &peer->handshake)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) wg_timers_any_authenticated_packet_traversal(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) wg_timers_any_authenticated_packet_sent(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) atomic64_set(&peer->last_sent_handshake,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) ktime_get_coarse_boottime_ns());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) wg_socket_send_buffer_to_peer(peer, &packet, sizeof(packet),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) HANDSHAKE_DSCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) wg_timers_handshake_initiated(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void wg_packet_handshake_send_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct wg_peer *peer = container_of(work, struct wg_peer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) transmit_handshake_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) wg_packet_send_handshake_initiation(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) wg_peer_put(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) bool is_retry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (!is_retry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) peer->timer_handshake_attempts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) /* We check last_sent_handshake here in addition to the actual function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * we're queueing up, so that we don't queue things if not strictly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * necessary:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) REKEY_TIMEOUT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unlikely(READ_ONCE(peer->is_dead)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) wg_peer_get(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* Queues up calling packet_send_queued_handshakes(peer), where we do a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * peer_put(peer) after:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (!queue_work(peer->device->handshake_send_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) &peer->transmit_handshake_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* If the work was already queued, we want to drop the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * extra reference:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) wg_peer_put(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) void wg_packet_send_handshake_response(struct wg_peer *peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct message_handshake_response packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) peer->device->dev->name, peer->internal_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) &peer->endpoint.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (wg_noise_handshake_create_response(&packet, &peer->handshake)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (wg_noise_handshake_begin_session(&peer->handshake,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) &peer->keypairs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) wg_timers_session_derived(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) wg_timers_any_authenticated_packet_traversal(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) wg_timers_any_authenticated_packet_sent(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) atomic64_set(&peer->last_sent_handshake,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) ktime_get_coarse_boottime_ns());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) wg_socket_send_buffer_to_peer(peer, &packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) sizeof(packet),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) HANDSHAKE_DSCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) void wg_packet_send_handshake_cookie(struct wg_device *wg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct sk_buff *initiating_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) __le32 sender_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct message_handshake_cookie packet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) wg->dev->name, initiating_skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) wg_cookie_message_create(&packet, initiating_skb, sender_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) &wg->cookie_checker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) wg_socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) sizeof(packet));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void keep_key_fresh(struct wg_peer *peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) struct noise_keypair *keypair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) bool send;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) send = keypair && READ_ONCE(keypair->sending.is_valid) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) (atomic64_read(&keypair->sending_counter) > REKEY_AFTER_MESSAGES ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) (keypair->i_am_the_initiator &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (unlikely(send))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) wg_packet_send_queued_handshake_initiation(peer, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static unsigned int calculate_skb_padding(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned int padded_size, last_unit = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (unlikely(!PACKET_CB(skb)->mtu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) return ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE) - last_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* We do this modulo business with the MTU, just in case the networking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * layer gives us a packet that's bigger than the MTU. In that case, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * wouldn't want the final subtraction to overflow in the case of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * padded_size being clamped. Fortunately, that's very rarely the case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * so we optimize for that not happening.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (unlikely(last_unit > PACKET_CB(skb)->mtu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) last_unit %= PACKET_CB(skb)->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) padded_size = min(PACKET_CB(skb)->mtu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return padded_size - last_unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) unsigned int padding_len, plaintext_len, trailer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct scatterlist sg[MAX_SKB_FRAGS + 8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) struct message_data *header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct sk_buff *trailer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int num_frags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* Force hash calculation before encryption so that flow analysis is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * consistent over the inner packet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) skb_get_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* Calculate lengths. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) padding_len = calculate_skb_padding(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) trailer_len = padding_len + noise_encrypted_len(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) plaintext_len = skb->len + padding_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /* Expand data section to have room for padding and auth tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) num_frags = skb_cow_data(skb, trailer_len, &trailer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* Set the padding to zeros, and make sure it and the auth tag are part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * of the skb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) memset(skb_tail_pointer(trailer), 0, padding_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) /* Expand head section to have room for our header and the network
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * stack's headers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) if (unlikely(skb_cow_head(skb, DATA_PACKET_HEAD_ROOM) < 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* Finalize checksum calculation for the inner packet, if required. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) skb_checksum_help(skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /* Only after checksumming can we safely add on the padding at the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * and the header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) skb_set_inner_network_header(skb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) header = (struct message_data *)skb_push(skb, sizeof(*header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) header->header.type = cpu_to_le32(MESSAGE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) header->key_idx = keypair->remote_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) header->counter = cpu_to_le64(PACKET_CB(skb)->nonce);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) pskb_put(skb, trailer, trailer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* Now we can encrypt the scattergather segments */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) sg_init_table(sg, num_frags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (skb_to_sgvec(skb, sg, sizeof(struct message_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) noise_encrypted_len(plaintext_len)) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return chacha20poly1305_encrypt_sg_inplace(sg, plaintext_len, NULL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) PACKET_CB(skb)->nonce,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) keypair->sending.key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) void wg_packet_send_keepalive(struct wg_peer *peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (skb_queue_empty(&peer->staged_packet_queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (unlikely(!skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) skb_reserve(skb, DATA_PACKET_HEAD_ROOM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) skb->dev = peer->device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) PACKET_CB(skb)->mtu = skb->dev->mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) skb_queue_tail(&peer->staged_packet_queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) peer->device->dev->name, peer->internal_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) &peer->endpoint.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) wg_packet_send_staged_packets(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct sk_buff *skb, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) bool is_keepalive, data_sent = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) wg_timers_any_authenticated_packet_traversal(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) wg_timers_any_authenticated_packet_sent(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) skb_list_walk_safe(first, skb, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) is_keepalive = skb->len == message_data_len(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (likely(!wg_socket_send_skb_to_peer(peer, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) PACKET_CB(skb)->ds) && !is_keepalive))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) data_sent = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (likely(data_sent))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) wg_timers_data_sent(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) keep_key_fresh(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) void wg_packet_tx_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) struct noise_keypair *keypair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) enum packet_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) struct sk_buff *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) (state = atomic_read_acquire(&PACKET_CB(first)->state)) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) PACKET_STATE_UNCRYPTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) wg_prev_queue_drop_peeked(&peer->tx_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) keypair = PACKET_CB(first)->keypair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (likely(state == PACKET_STATE_CRYPTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) wg_packet_create_data_done(peer, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) kfree_skb_list(first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) wg_noise_keypair_put(keypair, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) wg_peer_put(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) void wg_packet_encrypt_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct crypt_queue *queue = container_of(work, struct multicore_worker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) work)->ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct sk_buff *first, *skb, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) enum packet_state state = PACKET_STATE_CRYPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) skb_list_walk_safe(first, skb, next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (likely(encrypt_packet(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) PACKET_CB(first)->keypair))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) wg_reset_packet(skb, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) state = PACKET_STATE_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) wg_queue_enqueue_per_peer_tx(first, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (need_resched())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct wg_device *wg = peer->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (unlikely(READ_ONCE(peer->is_dead)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (unlikely(ret == -EPIPE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (likely(!ret || ret == -EPIPE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) wg_noise_keypair_put(PACKET_CB(first)->keypair, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) wg_peer_put(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) kfree_skb_list(first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) void wg_packet_purge_staged_packets(struct wg_peer *peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) spin_lock_bh(&peer->staged_packet_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) __skb_queue_purge(&peer->staged_packet_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) spin_unlock_bh(&peer->staged_packet_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) void wg_packet_send_staged_packets(struct wg_peer *peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct noise_keypair *keypair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct sk_buff_head packets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* Steal the current queue into our local one. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) __skb_queue_head_init(&packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) spin_lock_bh(&peer->staged_packet_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) skb_queue_splice_init(&peer->staged_packet_queue, &packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) spin_unlock_bh(&peer->staged_packet_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (unlikely(skb_queue_empty(&packets)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) /* First we make sure we have a valid reference to a valid key. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) keypair = wg_noise_keypair_get(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) rcu_dereference_bh(peer->keypairs.current_keypair));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (unlikely(!keypair))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) goto out_nokey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (unlikely(!READ_ONCE(keypair->sending.is_valid)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) goto out_nokey;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) REJECT_AFTER_TIME)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) goto out_invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) /* After we know we have a somewhat valid key, we now try to assign
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) * nonces to all of the packets in the queue. If we can't assign nonces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * for all of them, we just consider it a failure and wait for the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * handshake.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) skb_queue_walk(&packets, skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) /* 0 for no outer TOS: no leak. TODO: at some later point, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * might consider using flowi->tos as outer instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) PACKET_CB(skb)->nonce =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) atomic64_inc_return(&keypair->sending_counter) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) goto out_invalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) packets.prev->next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) wg_peer_get(keypair->entry.peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) PACKET_CB(packets.next)->keypair = keypair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) wg_packet_create_data(peer, packets.next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) out_invalid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) WRITE_ONCE(keypair->sending.is_valid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) out_nokey:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) wg_noise_keypair_put(keypair, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /* We orphan the packets if we're waiting on a handshake, so that they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * don't block a socket's pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) skb_queue_walk(&packets, skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) skb_orphan(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) /* Then we put them back on the top of the queue. We're not too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * concerned about accidentally getting things a little out of order if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * packets are being added really fast, because this queue is for before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * packets can even be sent and it's small anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) spin_lock_bh(&peer->staged_packet_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) skb_queue_splice(&packets, &peer->staged_packet_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) spin_unlock_bh(&peer->staged_packet_queue.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* If we're exiting because there's something wrong with the key, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) * means we should initiate a new handshake.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) wg_packet_send_queued_handshake_initiation(peer, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }