^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "ratelimiter.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/siphash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) static struct kmem_cache *entry_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) static hsiphash_key_t key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) static spinlock_t table_lock = __SPIN_LOCK_UNLOCKED("ratelimiter_table_lock");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static DEFINE_MUTEX(init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static u64 init_refcnt; /* Protected by init_lock, hence not atomic. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static atomic_t total_entries = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static unsigned int max_entries, table_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static void wg_ratelimiter_gc_entries(struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static DECLARE_DEFERRABLE_WORK(gc_work, wg_ratelimiter_gc_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static struct hlist_head *table_v4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static struct hlist_head *table_v6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct ratelimiter_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) u64 last_time_ns, tokens, ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) void *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct hlist_node hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) PACKETS_PER_SECOND = 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) PACKETS_BURSTABLE = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) PACKET_COST = NSEC_PER_SEC / PACKETS_PER_SECOND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) TOKEN_MAX = PACKET_COST * PACKETS_BURSTABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static void entry_free(struct rcu_head *rcu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) kmem_cache_free(entry_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) container_of(rcu, struct ratelimiter_entry, rcu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) atomic_dec(&total_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static void entry_uninit(struct ratelimiter_entry *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) hlist_del_rcu(&entry->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) call_rcu(&entry->rcu, entry_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Calling this function with a NULL work uninits all entries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static void wg_ratelimiter_gc_entries(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) const u64 now = ktime_get_coarse_boottime_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct ratelimiter_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct hlist_node *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) for (i = 0; i < table_size; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) spin_lock(&table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) hlist_for_each_entry_safe(entry, temp, &table_v4[i], hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (unlikely(!work) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) now - entry->last_time_ns > NSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) entry_uninit(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) hlist_for_each_entry_safe(entry, temp, &table_v6[i], hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (unlikely(!work) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) now - entry->last_time_ns > NSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) entry_uninit(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) spin_unlock(&table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (likely(work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (likely(work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) queue_delayed_work(system_power_efficient_wq, &gc_work, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* We only take the bottom half of the net pointer, so that we can hash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * 3 words in the end. This way, siphash's len param fits into the final
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * u32, and we don't incur an extra round.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) const u32 net_word = (unsigned long)net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct ratelimiter_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct hlist_head *bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) u64 ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (skb->protocol == htons(ETH_P_IP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ip = (u64 __force)ip_hdr(skb)->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) bucket = &table_v4[hsiphash_2u32(net_word, ip, &key) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) (table_size - 1)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) else if (skb->protocol == htons(ETH_P_IPV6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Only use 64 bits, so as to ratelimit the whole /64. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) memcpy(&ip, &ipv6_hdr(skb)->saddr, sizeof(ip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) bucket = &table_v6[hsiphash_3u32(net_word, ip >> 32, ip, &key) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) (table_size - 1)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) hlist_for_each_entry_rcu(entry, bucket, hash) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (entry->net == net && entry->ip == ip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) u64 now, tokens;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Quasi-inspired by nft_limit.c, but this is actually a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * slightly different algorithm. Namely, we incorporate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * the burst as part of the maximum tokens, rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * as part of the rate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) spin_lock(&entry->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) now = ktime_get_coarse_boottime_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) tokens = min_t(u64, TOKEN_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) entry->tokens + now -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) entry->last_time_ns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) entry->last_time_ns = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) ret = tokens >= PACKET_COST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) entry->tokens = ret ? tokens - PACKET_COST : tokens;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) spin_unlock(&entry->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (atomic_inc_return(&total_entries) > max_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) goto err_oom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) entry = kmem_cache_alloc(entry_cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (unlikely(!entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) goto err_oom;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) entry->net = net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) entry->ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) INIT_HLIST_NODE(&entry->hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_lock_init(&entry->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) entry->last_time_ns = ktime_get_coarse_boottime_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) entry->tokens = TOKEN_MAX - PACKET_COST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) spin_lock(&table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) hlist_add_head_rcu(&entry->hash, bucket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) spin_unlock(&table_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) err_oom:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) atomic_dec(&total_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int wg_ratelimiter_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) mutex_lock(&init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (++init_refcnt != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) entry_cache = KMEM_CACHE(ratelimiter_entry, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (!entry_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /* xt_hashlimit.c uses a slightly different algorithm for ratelimiting,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * but what it shares in common is that it uses a massive hashtable. So,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * we borrow their wisdom about good table sizes on different systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * dependent on RAM. This calculation here comes from there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) table_size = (totalram_pages() > (1U << 30) / PAGE_SIZE) ? 8192 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) max_t(unsigned long, 16, roundup_pow_of_two(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) (totalram_pages() << PAGE_SHIFT) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) (1U << 14) / sizeof(struct hlist_head)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) max_entries = table_size * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) table_v4 = kvcalloc(table_size, sizeof(*table_v4), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (unlikely(!table_v4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) goto err_kmemcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) table_v6 = kvcalloc(table_size, sizeof(*table_v6), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (unlikely(!table_v6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) kvfree(table_v4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) goto err_kmemcache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) queue_delayed_work(system_power_efficient_wq, &gc_work, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) get_random_bytes(&key, sizeof(key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) mutex_unlock(&init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) err_kmemcache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) kmem_cache_destroy(entry_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) --init_refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) mutex_unlock(&init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) void wg_ratelimiter_uninit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) mutex_lock(&init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (!init_refcnt || --init_refcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) cancel_delayed_work_sync(&gc_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) wg_ratelimiter_gc_entries(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) kvfree(table_v4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #if IS_ENABLED(CONFIG_IPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) kvfree(table_v6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) kmem_cache_destroy(entry_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) mutex_unlock(&init_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #include "selftest/ratelimiter.c"