Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #ifndef _WG_QUEUEING_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #define _WG_QUEUEING_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include "peer.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <net/ip_tunnels.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) struct wg_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) struct wg_peer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) struct multicore_worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) struct crypt_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) struct prev_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) struct sk_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) /* queueing.c APIs: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 			 unsigned int len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) void wg_packet_queue_free(struct crypt_queue *queue, bool purge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) struct multicore_worker __percpu *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /* receive.c APIs: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) void wg_packet_handshake_receive_worker(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) /* NAPI poll function: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) int wg_packet_rx_poll(struct napi_struct *napi, int budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) /* Workqueue worker: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) void wg_packet_decrypt_worker(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) /* send.c APIs: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 						bool is_retry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) void wg_packet_send_handshake_response(struct wg_peer *peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) void wg_packet_send_handshake_cookie(struct wg_device *wg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 				     struct sk_buff *initiating_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 				     __le32 sender_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) void wg_packet_send_keepalive(struct wg_peer *peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) void wg_packet_purge_staged_packets(struct wg_peer *peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) void wg_packet_send_staged_packets(struct wg_peer *peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) /* Workqueue workers: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) void wg_packet_handshake_send_worker(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) void wg_packet_tx_worker(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) void wg_packet_encrypt_worker(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) enum packet_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	PACKET_STATE_UNCRYPTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	PACKET_STATE_CRYPTED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	PACKET_STATE_DEAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) struct packet_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	u64 nonce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	struct noise_keypair *keypair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	atomic_t state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	u32 mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	u8 ds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) static inline bool wg_check_packet_protocol(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	__be16 real_protocol = ip_tunnel_parse_protocol(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	return real_protocol && skb->protocol == real_protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	u8 l4_hash = skb->l4_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	u8 sw_hash = skb->sw_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	u32 hash = skb->hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	skb_scrub_packet(skb, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	memset(&skb->headers_start, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	       offsetof(struct sk_buff, headers_end) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		       offsetof(struct sk_buff, headers_start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	if (encapsulating) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		skb->l4_hash = l4_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		skb->sw_hash = sw_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		skb->hash = hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	skb->queue_mapping = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	skb->nohdr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	skb->peeked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	skb->mac_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	skb->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #ifdef CONFIG_NET_SCHED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	skb->tc_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	skb_reset_redirect(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	skb->hdr_len = skb_headroom(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	skb_reset_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	skb_probe_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	skb_reset_inner_headers(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	unsigned int cpu = *stored_cpu, cpu_index, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	if (unlikely(cpu == nr_cpumask_bits ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		     !cpumask_test_cpu(cpu, cpu_online_mask))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		cpu_index = id % cpumask_weight(cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		cpu = cpumask_first(cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		for (i = 0; i < cpu_index; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			cpu = cpumask_next(cpu, cpu_online_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		*stored_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* This function is racy, in the sense that next is unlocked, so it could return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  * the same CPU twice. A race-free version of this would be to instead store an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  * atomic sequence number, do an increment-and-return, and then iterate through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * every possible CPU until we get to that index -- choose_cpu. However that's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * a bit slower, and it doesn't seem like this potential race actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  * introduces any performance loss, so we live with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static inline int wg_cpumask_next_online(int *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	int cpu = *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	*next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	return cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) void wg_prev_queue_init(struct prev_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /* Multi producer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* Single consumer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* Single consumer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	if (queue->peeked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		return queue->peeked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	queue->peeked = wg_prev_queue_dequeue(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	return queue->peeked;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* Single consumer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	queue->peeked = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static inline int wg_queue_enqueue_per_device_and_peer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	struct crypt_queue *device_queue, struct prev_queue *peer_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	/* We first queue this up for the peer ingestion, but the consumer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	 * will wait for the state to change to CRYPTED or DEAD before.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	/* Then we queue it up in the device queue, which consumes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	 * packet as soon as it can.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	cpu = wg_cpumask_next_online(next_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		return -EPIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	/* We take a reference, because as soon as we call atomic_set, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	 * peer can be freed from below us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	atomic_set_release(&PACKET_CB(skb)->state, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		      peer->device->packet_crypt_wq, &peer->transmit_packet_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	wg_peer_put(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	/* We take a reference, because as soon as we call atomic_set, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	 * peer can be freed from below us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	atomic_set_release(&PACKET_CB(skb)->state, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	napi_schedule(&peer->napi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	wg_peer_put(peer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) bool wg_packet_counter_selftest(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #endif /* _WG_QUEUEING_H */