author: Jason A. Donenfeld <Jason@zx2c4.com> 2023-07-03 03:27:04 +0200
committer: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 2023-07-19 16:37:02 +0200
commit: 9a1317621fbf2347872bba368e47f78a670bc2f9
parent: b79c09c2bf2d7643902a6ef26152de602c5c5e4b
Commit Summary:
Diffstat:
4 files changed, 14 insertions, 15 deletions
diff --git a/drivers/net/wireguard/queueing.c b/drivers/net/wireguard/queueing.c
index 8084e7408c0a..26d235d15235 100644
--- a/drivers/net/wireguard/queueing.c
+++ b/drivers/net/wireguard/queueing.c
@@ -28,6 +28,7 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
int ret;
memset(queue, 0, sizeof(*queue));
+ queue->last_cpu = -1;
ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
if (ret)
return ret;
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index 125284b346a7..1ea4f874e367 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -117,20 +117,17 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
return cpu;
}
-/* This function is racy, in the sense that next is unlocked, so it could return
- * the same CPU twice. A race-free version of this would be to instead store an
- * atomic sequence number, do an increment-and-return, and then iterate through
- * every possible CPU until we get to that index -- choose_cpu. However that's
- * a bit slower, and it doesn't seem like this potential race actually
- * introduces any performance loss, so we live with it.
+/* This function is racy, in the sense that it's called while last_cpu is
+ * unlocked, so it could return the same CPU twice. Adding locking or using
+ * atomic sequence numbers is slower though, and the consequences of racing are
+ * harmless, so live with it.
*/
-static inline int wg_cpumask_next_online(int *next)
+static inline int wg_cpumask_next_online(int *last_cpu)
{
- int cpu = *next;
-
- while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask)))
- cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
- *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits;
+ int cpu = cpumask_next(*last_cpu, cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_first(cpu_online_mask);
+ *last_cpu = cpu;
return cpu;
}
@@ -159,7 +156,7 @@ static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue)
static inline int wg_queue_enqueue_per_device_and_peer(
struct crypt_queue *device_queue, struct prev_queue *peer_queue,
- struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu)
+ struct sk_buff *skb, struct workqueue_struct *wq)
{
int cpu;
@@ -173,7 +170,7 @@ static inline int wg_queue_enqueue_per_device_and_peer(
/* Then we queue it up in the device queue, which consumes the
* packet as soon as it can.
*/
- cpu = wg_cpumask_next_online(next_cpu);
+ cpu = wg_cpumask_next_online(&device_queue->last_cpu);
if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb)))
return -EPIPE;
queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work);
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 7135d51d2d87..0b3f0c843550 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -524,7 +524,7 @@ static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb)
goto err;
ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb,
- wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu);
+ wg->packet_crypt_wq);
if (unlikely(ret == -EPIPE))
wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD);
if (likely(!ret || ret == -EPIPE)) {
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
index 5368f7c35b4b..95c853b59e1d 100644
--- a/drivers/net/wireguard/send.c
+++ b/drivers/net/wireguard/send.c
@@ -318,7 +318,7 @@ static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first)
goto err;
ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first,
- wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu);
+ wg->packet_crypt_wq);
if (unlikely(ret == -EPIPE))
wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD);
err: