^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "queueing.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/skb_array.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) struct multicore_worker __percpu *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) if (!worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) per_cpu_ptr(worker, cpu)->ptr = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) return worker;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) memset(queue, 0, sizeof(*queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) if (!queue->worker) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) ptr_ring_cleanup(&queue->ring, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) void wg_packet_queue_free(struct crypt_queue *queue, bool purge)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) free_percpu(queue->worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define NEXT(skb) ((skb)->prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define STUB(queue) ((struct sk_buff *)&queue->empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void wg_prev_queue_init(struct prev_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) NEXT(STUB(queue)) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) queue->head = queue->tail = STUB(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) queue->peeked = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) atomic_set(&queue->count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) BUILD_BUG_ON(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) offsetof(struct prev_queue, empty) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) offsetof(struct prev_queue, empty));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) WRITE_ONCE(NEXT(skb), NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) __wg_prev_queue_enqueue(queue, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (tail == STUB(queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (!next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) queue->tail = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) tail = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) next = smp_load_acquire(&NEXT(next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) queue->tail = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) atomic_dec(&queue->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (tail != READ_ONCE(queue->head))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) __wg_prev_queue_enqueue(queue, STUB(queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) next = smp_load_acquire(&NEXT(tail));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) queue->tail = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) atomic_dec(&queue->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #undef NEXT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #undef STUB