^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ip6_flowlabel.c IPv6 flowlabel manager.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/capability.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/net.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/in6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/pid_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/jump_label_ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <net/rawv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <net/transp_v6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define FL_MIN_LINGER 6 /* Minimal linger. It is set to 6sec specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) in old IPv6 RFC. Well, it was reasonable value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define FL_MAX_LINGER 150 /* Maximal linger timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* FL hash table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define FL_MAX_PER_SOCK 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define FL_MAX_SIZE 4096
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define FL_HASH_MASK 255
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define FL_HASH(l) (ntohl(l)&FL_HASH_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static atomic_t fl_size = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static void ip6_fl_gc(struct timer_list *unused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* FL hash table lock: it protects only of GC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static DEFINE_SPINLOCK(ip6_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) /* Big socket sock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static DEFINE_SPINLOCK(ip6_sk_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) DEFINE_STATIC_KEY_DEFERRED_FALSE(ipv6_flowlabel_exclusive, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) EXPORT_SYMBOL(ipv6_flowlabel_exclusive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define for_each_fl_rcu(hash, fl) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) for (fl = rcu_dereference_bh(fl_ht[(hash)]); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) fl != NULL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) fl = rcu_dereference_bh(fl->next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define for_each_fl_continue_rcu(fl) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) for (fl = rcu_dereference_bh(fl->next); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) fl != NULL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) fl = rcu_dereference_bh(fl->next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define for_each_sk_fl_rcu(np, sfl) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) for (sfl = rcu_dereference_bh(np->ipv6_fl_list); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) sfl != NULL; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) sfl = rcu_dereference_bh(sfl->next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct ip6_flowlabel *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) for_each_fl_rcu(FL_HASH(label), fl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (fl->label == label && net_eq(fl->fl_net, net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct ip6_flowlabel *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) fl = __fl_lookup(net, label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (fl && !atomic_inc_not_zero(&fl->users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) fl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static bool fl_shared_exclusive(struct ip6_flowlabel *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) return fl->share == IPV6_FL_S_EXCL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) fl->share == IPV6_FL_S_PROCESS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) fl->share == IPV6_FL_S_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void fl_free_rcu(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct ip6_flowlabel *fl = container_of(head, struct ip6_flowlabel, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (fl->share == IPV6_FL_S_PROCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) put_pid(fl->owner.pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) kfree(fl->opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) kfree(fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static void fl_free(struct ip6_flowlabel *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (!fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) if (fl_shared_exclusive(fl) || fl->opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static_branch_slow_dec_deferred(&ipv6_flowlabel_exclusive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) call_rcu(&fl->rcu, fl_free_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) static void fl_release(struct ip6_flowlabel *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) spin_lock_bh(&ip6_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) fl->lastuse = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (atomic_dec_and_test(&fl->users)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned long ttd = fl->lastuse + fl->linger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (time_after(ttd, fl->expires))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) fl->expires = ttd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) ttd = fl->expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct ipv6_txoptions *opt = fl->opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) fl->opt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) kfree(opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (!timer_pending(&ip6_fl_gc_timer) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) time_after(ip6_fl_gc_timer.expires, ttd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) mod_timer(&ip6_fl_gc_timer, ttd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) spin_unlock_bh(&ip6_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void ip6_fl_gc(struct timer_list *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) unsigned long now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned long sched = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) spin_lock(&ip6_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) for (i = 0; i <= FL_HASH_MASK; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct ip6_flowlabel *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct ip6_flowlabel __rcu **flp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) flp = &fl_ht[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) while ((fl = rcu_dereference_protected(*flp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) lockdep_is_held(&ip6_fl_lock))) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (atomic_read(&fl->users) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) unsigned long ttd = fl->lastuse + fl->linger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (time_after(ttd, fl->expires))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) fl->expires = ttd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) ttd = fl->expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (time_after_eq(now, ttd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) *flp = fl->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) fl_free(fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) atomic_dec(&fl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!sched || time_before(ttd, sched))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) sched = ttd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) flp = &fl->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (!sched && atomic_read(&fl_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) sched = now + FL_MAX_LINGER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (sched) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) mod_timer(&ip6_fl_gc_timer, sched);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) spin_unlock(&ip6_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static void __net_exit ip6_fl_purge(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) spin_lock_bh(&ip6_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) for (i = 0; i <= FL_HASH_MASK; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct ip6_flowlabel *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) struct ip6_flowlabel __rcu **flp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) flp = &fl_ht[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) while ((fl = rcu_dereference_protected(*flp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) lockdep_is_held(&ip6_fl_lock))) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (net_eq(fl->fl_net, net) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) atomic_read(&fl->users) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) *flp = fl->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) fl_free(fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) atomic_dec(&fl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) flp = &fl->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) spin_unlock_bh(&ip6_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static struct ip6_flowlabel *fl_intern(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) struct ip6_flowlabel *fl, __be32 label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct ip6_flowlabel *lfl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) fl->label = label & IPV6_FLOWLABEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) spin_lock_bh(&ip6_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (label == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (fl->label) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) lfl = __fl_lookup(net, fl->label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (!lfl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * we dropper the ip6_fl_lock, so this entry could reappear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * and we need to recheck with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * OTOH no need to search the active socket first, like it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * done in ipv6_flowlabel_opt - sock is locked, so new entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * with the same label can only appear on another sock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) lfl = __fl_lookup(net, fl->label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (lfl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) atomic_inc(&lfl->users);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) spin_unlock_bh(&ip6_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return lfl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) fl->lastuse = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) fl->next = fl_ht[FL_HASH(fl->label)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) atomic_inc(&fl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) spin_unlock_bh(&ip6_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* Socket flowlabel lists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct ipv6_fl_socklist *sfl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) struct ipv6_pinfo *np = inet6_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) label &= IPV6_FLOWLABEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) for_each_sk_fl_rcu(np, sfl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct ip6_flowlabel *fl = sfl->fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) fl->lastuse = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) EXPORT_SYMBOL_GPL(__fl6_sock_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) void fl6_free_socklist(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) struct ipv6_pinfo *np = inet6_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct ipv6_fl_socklist *sfl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (!rcu_access_pointer(np->ipv6_fl_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) spin_lock_bh(&ip6_sk_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) np->ipv6_fl_list = sfl->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) spin_unlock_bh(&ip6_sk_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) fl_release(sfl->fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) kfree_rcu(sfl, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) spin_lock_bh(&ip6_sk_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) spin_unlock_bh(&ip6_sk_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) /* Service routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) It is the only difficult place. flowlabel enforces equal headers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) before and including routing header, however user may supply options
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) following rthdr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) struct ip6_flowlabel *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct ipv6_txoptions *fopt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct ipv6_txoptions *fl_opt = fl->opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (!fopt || fopt->opt_flen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return fl_opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (fl_opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) opt_space->hopopt = fl_opt->hopopt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) opt_space->dst0opt = fl_opt->dst0opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) opt_space->srcrt = fl_opt->srcrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) opt_space->opt_nflen = fl_opt->opt_nflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (fopt->opt_nflen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return fopt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) opt_space->hopopt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) opt_space->dst0opt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) opt_space->srcrt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) opt_space->opt_nflen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) opt_space->dst1opt = fopt->dst1opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) opt_space->opt_flen = fopt->opt_flen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) opt_space->tot_len = fopt->tot_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return opt_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) EXPORT_SYMBOL_GPL(fl6_merge_options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static unsigned long check_linger(unsigned long ttl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (ttl < FL_MIN_LINGER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return FL_MIN_LINGER*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return ttl*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) linger = check_linger(linger);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) if (!linger)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) expires = check_linger(expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (!expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) spin_lock_bh(&ip6_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) fl->lastuse = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (time_before(fl->linger, linger))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) fl->linger = linger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (time_before(expires, fl->linger))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) expires = fl->linger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (time_before(fl->expires, fl->lastuse + expires))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) fl->expires = fl->lastuse + expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) spin_unlock_bh(&ip6_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) static struct ip6_flowlabel *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) sockptr_t optval, int optlen, int *err_p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct ip6_flowlabel *fl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int olen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) int addr_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) olen = optlen - CMSG_ALIGN(sizeof(*freq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (olen > 64 * 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) fl = kzalloc(sizeof(*fl), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (!fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (olen > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct msghdr msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct flowi6 flowi6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct ipcm6_cookie ipc6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (!fl->opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) memset(fl->opt, 0, sizeof(*fl->opt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) fl->opt->tot_len = sizeof(*fl->opt) + olen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) err = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (copy_from_sockptr_offset(fl->opt + 1, optval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) CMSG_ALIGN(sizeof(*freq)), olen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) msg.msg_controllen = olen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) msg.msg_control = (void *)(fl->opt+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) memset(&flowi6, 0, sizeof(flowi6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) ipc6.opt = fl->opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (fl->opt->opt_flen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) if (fl->opt->opt_nflen == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) kfree(fl->opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) fl->opt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) fl->fl_net = net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) fl->expires = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) fl->share = freq->flr_share;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) addr_type = ipv6_addr_type(&freq->flr_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if ((addr_type & IPV6_ADDR_MAPPED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) addr_type == IPV6_ADDR_ANY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) fl->dst = freq->flr_dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) atomic_set(&fl->users, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) switch (fl->share) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) case IPV6_FL_S_EXCL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) case IPV6_FL_S_ANY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) case IPV6_FL_S_PROCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) case IPV6_FL_S_USER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) fl->owner.uid = current_euid();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (fl_shared_exclusive(fl) || fl->opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static_branch_deferred_inc(&ipv6_flowlabel_exclusive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) return fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (fl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) kfree(fl->opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) kfree(fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *err_p = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static int mem_check(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct ipv6_pinfo *np = inet6_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) struct ipv6_fl_socklist *sfl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) int room = FL_MAX_SIZE - atomic_read(&fl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) for_each_sk_fl_rcu(np, sfl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) if (room <= 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) ((count >= FL_MAX_PER_SOCK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) !capable(CAP_NET_ADMIN)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct ip6_flowlabel *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) spin_lock_bh(&ip6_sk_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) sfl->fl = fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) sfl->next = np->ipv6_fl_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) rcu_assign_pointer(np->ipv6_fl_list, sfl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) spin_unlock_bh(&ip6_sk_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct ipv6_pinfo *np = inet6_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct ipv6_fl_socklist *sfl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (flags & IPV6_FL_F_REMOTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) if (np->repflow) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) freq->flr_label = np->flow_label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) for_each_sk_fl_rcu(np, sfl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) spin_lock_bh(&ip6_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) freq->flr_label = sfl->fl->label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) freq->flr_dst = sfl->fl->dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) freq->flr_share = sfl->fl->share;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) freq->flr_linger = sfl->fl->linger / HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) spin_unlock_bh(&ip6_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) #define socklist_dereference(__sflp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) rcu_dereference_protected(__sflp, lockdep_is_held(&ip6_sk_fl_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static int ipv6_flowlabel_put(struct sock *sk, struct in6_flowlabel_req *freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct ipv6_pinfo *np = inet6_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct ipv6_fl_socklist __rcu **sflp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct ipv6_fl_socklist *sfl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (freq->flr_flags & IPV6_FL_F_REFLECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) if (sk->sk_protocol != IPPROTO_TCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (!np->repflow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) np->flow_label = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) np->repflow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) spin_lock_bh(&ip6_sk_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) for (sflp = &np->ipv6_fl_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) (sfl = socklist_dereference(*sflp)) != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) sflp = &sfl->next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (sfl->fl->label == freq->flr_label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) goto found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) spin_unlock_bh(&ip6_sk_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (freq->flr_label == (np->flow_label & IPV6_FLOWLABEL_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) np->flow_label &= ~IPV6_FLOWLABEL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) *sflp = sfl->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) spin_unlock_bh(&ip6_sk_fl_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) fl_release(sfl->fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) kfree_rcu(sfl, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static int ipv6_flowlabel_renew(struct sock *sk, struct in6_flowlabel_req *freq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct ipv6_pinfo *np = inet6_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct ipv6_fl_socklist *sfl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) for_each_sk_fl_rcu(np, sfl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (sfl->fl->label == freq->flr_label) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) err = fl6_renew(sfl->fl, freq->flr_linger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) freq->flr_expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) if (freq->flr_share == IPV6_FL_S_NONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) ns_capable(net->user_ns, CAP_NET_ADMIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct ip6_flowlabel *fl = fl_lookup(net, freq->flr_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (fl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) err = fl6_renew(fl, freq->flr_linger,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) freq->flr_expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) fl_release(fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return -ESRCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static int ipv6_flowlabel_get(struct sock *sk, struct in6_flowlabel_req *freq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) sockptr_t optval, int optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct ipv6_fl_socklist *sfl, *sfl1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct ip6_flowlabel *fl, *fl1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) struct ipv6_pinfo *np = inet6_sk(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (freq->flr_flags & IPV6_FL_F_REFLECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (net->ipv6.sysctl.flowlabel_consistency) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) if (sk->sk_protocol != IPPROTO_TCP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) np->repflow = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) if (freq->flr_label & ~IPV6_FLOWLABEL_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (net->ipv6.sysctl.flowlabel_state_ranges &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) (freq->flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) fl = fl_create(net, sk, freq, optval, optlen, &err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (!fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) if (freq->flr_label) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) for_each_sk_fl_rcu(np, sfl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) if (sfl->fl->label == freq->flr_label) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (freq->flr_flags & IPV6_FL_F_EXCL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) fl1 = sfl->fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (!atomic_inc_not_zero(&fl1->users))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) fl1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (!fl1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) fl1 = fl_lookup(net, freq->flr_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (fl1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) recheck:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (freq->flr_flags&IPV6_FL_F_EXCL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) err = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (fl1->share == IPV6_FL_S_EXCL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) fl1->share != fl->share ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) ((fl1->share == IPV6_FL_S_PROCESS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) (fl1->owner.pid != fl->owner.pid)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) ((fl1->share == IPV6_FL_S_USER) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) !uid_eq(fl1->owner.uid, fl->owner.uid)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!sfl1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) goto release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (fl->linger > fl1->linger)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) fl1->linger = fl->linger;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if ((long)(fl->expires - fl1->expires) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) fl1->expires = fl->expires;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) fl_link(np, sfl1, fl1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) fl_free(fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) release:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) fl_release(fl1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) if (!(freq->flr_flags & IPV6_FL_F_CREATE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (!sfl1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) err = mem_check(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (err != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) fl1 = fl_intern(net, fl, freq->flr_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (fl1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) goto recheck;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (!freq->flr_label) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) size_t offset = offsetof(struct in6_flowlabel_req, flr_label);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (copy_to_sockptr_offset(optval, offset, &fl->label,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) sizeof(fl->label))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) /* Intentionally ignore fault. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) fl_link(np, sfl1, fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) fl_free(fl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) kfree(sfl1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) struct in6_flowlabel_req freq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (optlen < sizeof(freq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (copy_from_sockptr(&freq, optval, sizeof(freq)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) switch (freq.flr_action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) case IPV6_FL_A_PUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) return ipv6_flowlabel_put(sk, &freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) case IPV6_FL_A_RENEW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) return ipv6_flowlabel_renew(sk, &freq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) case IPV6_FL_A_GET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return ipv6_flowlabel_get(sk, &freq, optval, optlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct ip6fl_iter_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct seq_net_private p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct pid_namespace *pid_ns;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) int bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) #define ip6fl_seq_private(seq) ((struct ip6fl_iter_state *)(seq)->private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) struct ip6_flowlabel *fl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct net *net = seq_file_net(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) for_each_fl_rcu(state->bucket, fl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (net_eq(fl->fl_net, net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) fl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct net *net = seq_file_net(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) for_each_fl_continue_rcu(fl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (net_eq(fl->fl_net, net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) try_again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) if (++state->bucket <= FL_HASH_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) for_each_fl_rcu(state->bucket, fl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (net_eq(fl->fl_net, net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) goto try_again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) fl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) struct ip6_flowlabel *fl = ip6fl_get_first(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) --pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return pos ? NULL : fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) __acquires(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) state->pid_ns = proc_pid_ns(file_inode(seq->file)->i_sb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) rcu_read_lock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct ip6_flowlabel *fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (v == SEQ_START_TOKEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) fl = ip6fl_get_first(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) fl = ip6fl_get_next(seq, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) ++*pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) return fl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) static void ip6fl_seq_stop(struct seq_file *seq, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) __releases(RCU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) rcu_read_unlock_bh();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) static int ip6fl_seq_show(struct seq_file *seq, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (v == SEQ_START_TOKEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) seq_puts(seq, "Label S Owner Users Linger Expires Dst Opt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) struct ip6_flowlabel *fl = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) seq_printf(seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) (unsigned int)ntohl(fl->label),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) fl->share,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) ((fl->share == IPV6_FL_S_PROCESS) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) pid_nr_ns(fl->owner.pid, state->pid_ns) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) ((fl->share == IPV6_FL_S_USER) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 0)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) atomic_read(&fl->users),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) fl->linger/HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) (long)(fl->expires - jiffies)/HZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) &fl->dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) fl->opt ? fl->opt->opt_nflen : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) static const struct seq_operations ip6fl_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) .start = ip6fl_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) .next = ip6fl_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) .stop = ip6fl_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) .show = ip6fl_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) static int __net_init ip6_flowlabel_proc_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if (!proc_create_net("ip6_flowlabel", 0444, net->proc_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) &ip6fl_seq_ops, sizeof(struct ip6fl_iter_state)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) remove_proc_entry("ip6_flowlabel", net->proc_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) static inline int ip6_flowlabel_proc_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) static inline void ip6_flowlabel_proc_fini(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) static void __net_exit ip6_flowlabel_net_exit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) ip6_fl_purge(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) ip6_flowlabel_proc_fini(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) static struct pernet_operations ip6_flowlabel_net_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) .init = ip6_flowlabel_proc_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) .exit = ip6_flowlabel_net_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) int ip6_flowlabel_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return register_pernet_subsys(&ip6_flowlabel_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) void ip6_flowlabel_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) static_key_deferred_flush(&ipv6_flowlabel_exclusive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) del_timer(&ip6_fl_gc_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) unregister_pernet_subsys(&ip6_flowlabel_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }