^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Linux IPv6 multicast routing support for BSD pim6sd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Based on net/ipv4/ipmr.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * LSIIT Laboratory, Strasbourg, France
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * 6WIND, Paris, France
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C)2007,2008 USAGI/WIDE Project
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/fcntl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/socket.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/inet.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/inetdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/rhashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <net/protocol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <net/raw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/notifier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <net/fib_rules.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <net/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <net/ip6_route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <linux/mroute6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <linux/pim.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <net/addrconf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <linux/netfilter_ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <net/ip6_checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/netconf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <net/ip_tunnels.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/nospec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct ip6mr_rule {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct fib_rule common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct ip6mr_result {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /* Big lock, protecting vif table, mrt cache and mroute socket state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) Note that the changes are semaphored via rtnl_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static DEFINE_RWLOCK(mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Multicast router control variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Special spinlock for queue of unresolved entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static DEFINE_SPINLOCK(mfc_unres_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) /* We return to original Alan's scheme. Hash table of resolved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) entries is changed only in process context and protected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) with weak lock mrt_lock. Queue of unresolved entries is protected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) with strong spinlock mfc_unres_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) In this case data path is free of exclusive locks at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static struct kmem_cache *mrt_cachep __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static struct mr_table *ip6mr_new_table(struct net *net, u32 id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static void ip6mr_free_table(struct mr_table *mrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct net_device *dev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct mfc6_cache *cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) mifi_t mifi, int assert);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static int ip6mr_rtm_dumproute(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct netlink_callback *cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) static void mroute_clean_tables(struct mr_table *mrt, int flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static void ipmr_expire_process(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define ip6mr_for_each_table(mrt, net) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) lockdep_rtnl_is_held() || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) list_empty(&net->ipv6.mr6_tables))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static struct mr_table *ip6mr_mr_table_iter(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct mr_table *mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct mr_table *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (!mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) ret = list_entry_rcu(net->ipv6.mr6_tables.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct mr_table, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) ret = list_entry_rcu(mrt->list.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct mr_table, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (&ret->list == &net->ipv6.mr6_tables)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ip6mr_for_each_table(mrt, net) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (mrt->id == id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct mr_table **mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct ip6mr_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct fib_lookup_arg arg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) .result = &res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) .flags = FIB_LOOKUP_NOREF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* update flow if oif or iif point to device enslaved to l3mdev */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) l3mdev_update_flow(net, flowi6_to_flowi(flp6));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) flowi6_to_flowi(flp6), 0, &arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) *mrt = res.mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int flags, struct fib_lookup_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct ip6mr_result *res = arg->result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) switch (rule->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) case FR_ACT_TO_TBL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) case FR_ACT_UNREACHABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return -ENETUNREACH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) case FR_ACT_PROHIBIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) case FR_ACT_BLACKHOLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) arg->table = fib_rule_get_table(rule, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) mrt = ip6mr_get_table(rule->fr_net, arg->table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) res->mrt = mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) FRA_GENERIC_POLICY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) struct fib_rule_hdr *frh, struct nlattr **tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct nlattr **tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct fib_rule_hdr *frh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) frh->dst_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) frh->src_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) frh->tos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) .family = RTNL_FAMILY_IP6MR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) .rule_size = sizeof(struct ip6mr_rule),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) .addr_size = sizeof(struct in6_addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) .action = ip6mr_rule_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) .match = ip6mr_rule_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) .configure = ip6mr_rule_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) .compare = ip6mr_rule_compare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) .fill = ip6mr_rule_fill,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) .nlgroup = RTNLGRP_IPV6_RULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) .policy = ip6mr_rule_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) static int __net_init ip6mr_rules_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct fib_rules_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ops = fib_rules_register(&ip6mr_rules_ops_template, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (IS_ERR(ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return PTR_ERR(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) INIT_LIST_HEAD(&net->ipv6.mr6_tables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (IS_ERR(mrt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) err = PTR_ERR(mrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) net->ipv6.mr6_rules_ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) ip6mr_free_table(mrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) fib_rules_unregister(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static void __net_exit ip6mr_rules_exit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct mr_table *mrt, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) list_del(&mrt->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) ip6mr_free_table(mrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) fib_rules_unregister(net->ipv6.mr6_rules_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static unsigned int ip6mr_rules_seq_read(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) bool ip6mr_rule_default(const struct fib_rule *rule)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) rule->table == RT6_TABLE_DFLT && !rule->l3mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) EXPORT_SYMBOL(ip6mr_rule_default);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #define ip6mr_for_each_table(mrt, net) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static struct mr_table *ip6mr_mr_table_iter(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct mr_table *mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return net->ipv6.mrt6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return net->ipv6.mrt6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct mr_table **mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) *mrt = net->ipv6.mrt6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static int __net_init ip6mr_rules_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (IS_ERR(mrt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return PTR_ERR(mrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) net->ipv6.mrt6 = mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static void __net_exit ip6mr_rules_exit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ip6mr_free_table(net->ipv6.mrt6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) net->ipv6.mrt6 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static unsigned int ip6mr_rules_seq_read(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) const void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) const struct mfc6_cache_cmp_arg *cmparg = arg->key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct mfc6_cache *c = (struct mfc6_cache *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static const struct rhashtable_params ip6mr_rht_params = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) .head_offset = offsetof(struct mr_mfc, mnode),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) .key_offset = offsetof(struct mfc6_cache, cmparg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) .key_len = sizeof(struct mfc6_cache_cmp_arg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) .nelem_hint = 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) .obj_cmpfn = ip6mr_hash_cmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) .automatic_shrinking = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static void ip6mr_new_table_set(struct mr_table *mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) .mf6c_origin = IN6ADDR_ANY_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) .mf6c_mcastgrp = IN6ADDR_ANY_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static struct mr_table_ops ip6mr_mr_table_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) .rht_params = &ip6mr_rht_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) .cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) mrt = ip6mr_get_table(net, id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) return mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return mr_table_alloc(net, id, &ip6mr_mr_table_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ipmr_expire_process, ip6mr_new_table_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static void ip6mr_free_table(struct mr_table *mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) del_timer_sync(&mrt->ipmr_expire_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) rhltable_destroy(&mrt->mfc_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) kfree(mrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /* The /proc interfaces to multicast routing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * /proc/ip6_mr_cache /proc/ip6_mr_vif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) __acquires(mrt_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct mr_vif_iter *iter = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) struct net *net = seq_file_net(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) if (!mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) iter->mrt = mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) read_lock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return mr_vif_seq_start(seq, pos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) __releases(mrt_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) read_unlock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct mr_vif_iter *iter = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) struct mr_table *mrt = iter->mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (v == SEQ_START_TOKEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) seq_puts(seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) const struct vif_device *vif = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) const char *name = vif->dev ? vif->dev->name : "none";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) seq_printf(seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) vif - mrt->vif_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) name, vif->bytes_in, vif->pkt_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) vif->bytes_out, vif->pkt_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) vif->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static const struct seq_operations ip6mr_vif_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) .start = ip6mr_vif_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) .next = mr_vif_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) .stop = ip6mr_vif_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) .show = ip6mr_vif_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct net *net = seq_file_net(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (!mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) int n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (v == SEQ_START_TOKEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) seq_puts(seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) "Group "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) "Origin "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) "Iif Pkts Bytes Wrong Oifs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) const struct mfc6_cache *mfc = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) const struct mr_mfc_iter *it = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) struct mr_table *mrt = it->mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) seq_printf(seq, "%pI6 %pI6 %-3hd",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) mfc->_c.mfc_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (it->cache != &mrt->mfc_unres_queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) seq_printf(seq, " %8lu %8lu %8lu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) mfc->_c.mfc_un.res.pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) mfc->_c.mfc_un.res.bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) mfc->_c.mfc_un.res.wrong_if);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) for (n = mfc->_c.mfc_un.res.minvif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) n < mfc->_c.mfc_un.res.maxvif; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (VIF_EXISTS(mrt, n) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) mfc->_c.mfc_un.res.ttls[n] < 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) seq_printf(seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) " %2d:%-3d", n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) mfc->_c.mfc_un.res.ttls[n]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* unresolved mfc_caches don't contain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * pkt, bytes and wrong_if values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) seq_putc(seq, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static const struct seq_operations ipmr_mfc_seq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) .start = ipmr_mfc_seq_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) .next = mr_mfc_seq_next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) .stop = mr_mfc_seq_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) .show = ipmr_mfc_seq_show,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #ifdef CONFIG_IPV6_PIMSM_V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static int pim6_rcv(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct pimreghdr *pim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) struct ipv6hdr *encap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct net_device *reg_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) struct net *net = dev_net(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) struct flowi6 fl6 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) .flowi6_iif = skb->dev->ifindex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) .flowi6_mark = skb->mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) int reg_vif_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) pim = (struct pimreghdr *)skb_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) (pim->flags & PIM_NULL_REGISTER) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) sizeof(*pim), IPPROTO_PIM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) csum_partial((void *)pim, sizeof(*pim), 0)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) csum_fold(skb_checksum(skb, 0, skb->len, 0))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /* check if the inner packet is destined to mcast group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) encap = (struct ipv6hdr *)(skb_transport_header(skb) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) sizeof(*pim));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (!ipv6_addr_is_multicast(&encap->daddr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) encap->payload_len == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) reg_vif_num = mrt->mroute_reg_vif_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) read_lock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (reg_vif_num >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) reg_dev = mrt->vif_table[reg_vif_num].dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (reg_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) dev_hold(reg_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) read_unlock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) if (!reg_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) skb->mac_header = skb->network_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) skb_pull(skb, (u8 *)encap - skb->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) skb->protocol = htons(ETH_P_IPV6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) skb->ip_summed = CHECKSUM_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) netif_rx(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) dev_put(reg_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static const struct inet6_protocol pim6_protocol = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) .handler = pim6_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /* Service routines creating virtual interfaces: PIMREG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct net *net = dev_net(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct flowi6 fl6 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) .flowi6_oif = dev->ifindex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) .flowi6_mark = skb->mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (!pskb_inet_may_pull(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) goto tx_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) goto tx_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) read_lock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) read_unlock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) tx_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) dev->stats.tx_errors++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return NETDEV_TX_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static int reg_vif_get_iflink(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static const struct net_device_ops reg_vif_netdev_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) .ndo_start_xmit = reg_vif_xmit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) .ndo_get_iflink = reg_vif_get_iflink,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) static void reg_vif_setup(struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) dev->type = ARPHRD_PIMREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) dev->flags = IFF_NOARP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) dev->netdev_ops = ®_vif_netdev_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) dev->needs_free_netdev = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) dev->features |= NETIF_F_NETNS_LOCAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) char name[IFNAMSIZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (mrt->id == RT6_TABLE_DFLT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) sprintf(name, "pim6reg");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) sprintf(name, "pim6reg%u", mrt->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) dev_net_set(dev, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (register_netdevice(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) free_netdev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (dev_open(dev, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) dev_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) unregister_netdevice(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) static int call_ip6mr_vif_entry_notifiers(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) enum fib_event_type event_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) struct vif_device *vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) mifi_t vif_index, u32 tb_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) vif, vif_index, tb_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) &net->ipv6.ipmr_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static int call_ip6mr_mfc_entry_notifiers(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) enum fib_event_type event_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct mfc6_cache *mfc, u32 tb_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) &mfc->_c, tb_id, &net->ipv6.ipmr_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* Delete a VIF entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct list_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) struct vif_device *v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) struct inet6_dev *in6_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (vifi < 0 || vifi >= mrt->maxvif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) v = &mrt->vif_table[vifi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (VIF_EXISTS(mrt, vifi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) FIB_EVENT_VIF_DEL, v, vifi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) mrt->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) write_lock_bh(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) dev = v->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) v->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) write_unlock_bh(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) #ifdef CONFIG_IPV6_PIMSM_V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) if (vifi == mrt->mroute_reg_vif_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) mrt->mroute_reg_vif_num = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) if (vifi + 1 == mrt->maxvif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) for (tmp = vifi - 1; tmp >= 0; tmp--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) if (VIF_EXISTS(mrt, tmp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) mrt->maxvif = tmp + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) write_unlock_bh(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) dev_set_allmulti(dev, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) in6_dev = __in6_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (in6_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) in6_dev->cnf.mc_forwarding--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) NETCONFA_MC_FORWARDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) dev->ifindex, &in6_dev->cnf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if ((v->flags & MIFF_REGISTER) && !notify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) unregister_netdevice_queue(dev, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static inline void ip6mr_cache_free_rcu(struct rcu_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static inline void ip6mr_cache_free(struct mfc6_cache *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) /* Destroy an unresolved cache entry, killing queued skbs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) and reporting error to netlink readers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct net *net = read_pnet(&mrt->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) atomic_dec(&mrt->cache_resolve_queue_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (ipv6_hdr(skb)->version == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct nlmsghdr *nlh = skb_pull(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) sizeof(struct ipv6hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) nlh->nlmsg_type = NLMSG_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) skb_trim(skb, nlh->nlmsg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) ip6mr_cache_free(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) /* Timer process for all the unresolved queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) static void ipmr_do_expire_process(struct mr_table *mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) unsigned long now = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) unsigned long expires = 10 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct mr_mfc *c, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) if (time_after(c->mfc_un.unres.expires, now)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /* not yet... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) unsigned long interval = c->mfc_un.unres.expires - now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (interval < expires)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) expires = interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) list_del(&c->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (!list_empty(&mrt->mfc_unres_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) static void ipmr_expire_process(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (!spin_trylock(&mfc_unres_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (!list_empty(&mrt->mfc_unres_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) ipmr_do_expire_process(mrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) spin_unlock(&mfc_unres_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) /* Fill oifs list. It is called under write locked mrt_lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) static void ip6mr_update_thresholds(struct mr_table *mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct mr_mfc *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) unsigned char *ttls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) int vifi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) cache->mfc_un.res.minvif = MAXMIFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) cache->mfc_un.res.maxvif = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) for (vifi = 0; vifi < mrt->maxvif; vifi++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (VIF_EXISTS(mrt, vifi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) ttls[vifi] && ttls[vifi] < 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) cache->mfc_un.res.ttls[vifi] = ttls[vifi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (cache->mfc_un.res.minvif > vifi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) cache->mfc_un.res.minvif = vifi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (cache->mfc_un.res.maxvif <= vifi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) cache->mfc_un.res.maxvif = vifi + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) cache->mfc_un.res.lastuse = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) static int mif6_add(struct net *net, struct mr_table *mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct mif6ctl *vifc, int mrtsock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) int vifi = vifc->mif6c_mifi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct vif_device *v = &mrt->vif_table[vifi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct inet6_dev *in6_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) /* Is vif busy ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) if (VIF_EXISTS(mrt, vifi))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) return -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) switch (vifc->mif6c_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) #ifdef CONFIG_IPV6_PIMSM_V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) case MIFF_REGISTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) * Special Purpose VIF in PIM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * All the packets will be sent to the daemon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (mrt->mroute_reg_vif_num >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) dev = ip6mr_reg_vif(net, mrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) err = dev_set_allmulti(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) unregister_netdevice(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) dev = dev_get_by_index(net, vifc->mif6c_pifi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) err = dev_set_allmulti(dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) in6_dev = __in6_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (in6_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) in6_dev->cnf.mc_forwarding++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) NETCONFA_MC_FORWARDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) dev->ifindex, &in6_dev->cnf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /* Fill in the VIF structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) vif_device_init(v, dev, vifc->vifc_rate_limit, vifc->vifc_threshold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) vifc->mif6c_flags | (!mrtsock ? VIFF_STATIC : 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) MIFF_REGISTER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* And finish update writing critical data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) write_lock_bh(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) v->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) #ifdef CONFIG_IPV6_PIMSM_V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) if (v->flags & MIFF_REGISTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) mrt->mroute_reg_vif_num = vifi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (vifi + 1 > mrt->maxvif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) mrt->maxvif = vifi + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) write_unlock_bh(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) v, vifi, mrt->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) const struct in6_addr *origin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) const struct in6_addr *mcastgrp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct mfc6_cache_cmp_arg arg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) .mf6c_origin = *origin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) .mf6c_mcastgrp = *mcastgrp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return mr_mfc_find(mrt, &arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /* Look for a (*,G) entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) static struct mfc6_cache *ip6mr_cache_find_any(struct mr_table *mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct in6_addr *mcastgrp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) mifi_t mifi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct mfc6_cache_cmp_arg arg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) .mf6c_origin = in6addr_any,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) .mf6c_mcastgrp = *mcastgrp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (ipv6_addr_any(mcastgrp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) return mr_mfc_find_any_parent(mrt, mifi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return mr_mfc_find_any(mrt, mifi, &arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /* Look for a (S,G,iif) entry if parent != -1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) static struct mfc6_cache *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) ip6mr_cache_find_parent(struct mr_table *mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) const struct in6_addr *origin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) const struct in6_addr *mcastgrp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) int parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) struct mfc6_cache_cmp_arg arg = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) .mf6c_origin = *origin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) .mf6c_mcastgrp = *mcastgrp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) return mr_mfc_find_parent(mrt, &arg, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) /* Allocate a multicast cache entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) static struct mfc6_cache *ip6mr_cache_alloc(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) c->_c.mfc_un.res.minvif = MAXMIFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) c->_c.free = ip6mr_cache_free_rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) refcount_set(&c->_c.mfc_un.res.refcount, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * A cache entry has gone into a resolved state from queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) struct mfc6_cache *uc, struct mfc6_cache *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * Play the pending entries through our router
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (ipv6_hdr(skb)->version == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct nlmsghdr *nlh = skb_pull(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) sizeof(struct ipv6hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) if (mr_fill_mroute(mrt, skb, &c->_c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) nlmsg_data(nlh)) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) nlh->nlmsg_type = NLMSG_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) skb_trim(skb, nlh->nlmsg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ip6_mr_forward(net, mrt, skb->dev, skb, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * Bounce a cache query up to pim6sd and netlink.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * Called under mrt_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) mifi_t mifi, int assert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) struct sock *mroute6_sk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) struct mrt6msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) #ifdef CONFIG_IPV6_PIMSM_V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) if (assert == MRT6MSG_WHOLEPKT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) +sizeof(*msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) /* I suppose that internal messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * do not require checksums */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) #ifdef CONFIG_IPV6_PIMSM_V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (assert == MRT6MSG_WHOLEPKT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) /* Ugly, but we have no choice with this interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) Duplicate old header, fix length etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) And all this only to mangle msg->im6_msgtype and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) to set msg->im6_mbz to "mbz" :-)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) skb_push(skb, -skb_network_offset(pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) skb_push(skb, sizeof(*msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) msg = (struct mrt6msg *)skb_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) msg->im6_mbz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) msg->im6_msgtype = MRT6MSG_WHOLEPKT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) msg->im6_mif = mrt->mroute_reg_vif_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) msg->im6_pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) msg->im6_src = ipv6_hdr(pkt)->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) msg->im6_dst = ipv6_hdr(pkt)->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * Copy the IP header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) skb_put(skb, sizeof(struct ipv6hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) skb_reset_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * Add our header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) skb_put(skb, sizeof(*msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) skb_reset_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) msg = (struct mrt6msg *)skb_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) msg->im6_mbz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) msg->im6_msgtype = assert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) msg->im6_mif = mifi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) msg->im6_pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) msg->im6_src = ipv6_hdr(pkt)->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) msg->im6_dst = ipv6_hdr(pkt)->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) skb_dst_set(skb, dst_clone(skb_dst(pkt)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) skb->ip_summed = CHECKSUM_UNNECESSARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) mroute6_sk = rcu_dereference(mrt->mroute_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (!mroute6_sk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) mrt6msg_netlink_event(mrt, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /* Deliver to user space multicast routing algorithms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) ret = sock_queue_rcv_skb(mroute6_sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /* Queue a packet for resolution. It gets locked cache entry! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) struct sk_buff *skb, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) struct mfc6_cache *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) spin_lock_bh(&mfc_unres_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) * Create a new entry if allowable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) c = ip6mr_cache_alloc_unres();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) if (!c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) spin_unlock_bh(&mfc_unres_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) /* Fill in the new cache entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) c->_c.mfc_parent = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) c->mf6c_origin = ipv6_hdr(skb)->saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) * Reflect first query at pim6sd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) /* If the report failed throw the cache entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) out - Brad Parker
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) spin_unlock_bh(&mfc_unres_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) ip6mr_cache_free(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) atomic_inc(&mrt->cache_resolve_queue_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) list_add(&c->_c.list, &mrt->mfc_unres_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) mr6_netlink_event(mrt, c, RTM_NEWROUTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) ipmr_do_expire_process(mrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) /* See if we can append the packet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) skb->skb_iif = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) spin_unlock_bh(&mfc_unres_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) * MFC6 cache manipulation by user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) int parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) struct mfc6_cache *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) /* The entries are added/deleted only under RTNL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) &mfc->mf6cc_mcastgrp.sin6_addr, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) list_del_rcu(&c->_c.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) FIB_EVENT_ENTRY_DEL, c, mrt->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) mr6_netlink_event(mrt, c, RTM_DELROUTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) mr_cache_put(&c->_c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static int ip6mr_device_event(struct notifier_block *this,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) unsigned long event, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) struct net_device *dev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) struct net *net = dev_net(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct vif_device *v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) int ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) if (event != NETDEV_UNREGISTER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) ip6mr_for_each_table(mrt, net) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) v = &mrt->vif_table[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) for (ct = 0; ct < mrt->maxvif; ct++, v++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (v->dev == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) mif6_delete(mrt, ct, 1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) static unsigned int ip6mr_seq_read(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) static int ip6mr_dump(struct net *net, struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) ip6mr_mr_table_iter, &mrt_lock, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) static struct notifier_block ip6_mr_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) .notifier_call = ip6mr_device_event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) static const struct fib_notifier_ops ip6mr_notifier_ops_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) .family = RTNL_FAMILY_IP6MR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) .fib_seq_read = ip6mr_seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) .fib_dump = ip6mr_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) static int __net_init ip6mr_notifier_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) struct fib_notifier_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) net->ipv6.ipmr_seq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (IS_ERR(ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) return PTR_ERR(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) net->ipv6.ip6mr_notifier_ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) static void __net_exit ip6mr_notifier_exit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) net->ipv6.ip6mr_notifier_ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* Setup for IP multicast routing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) static int __net_init ip6mr_net_init(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) err = ip6mr_notifier_init(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) err = ip6mr_rules_init(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) goto ip6mr_rules_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (!proc_create_net("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_seq_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) sizeof(struct mr_vif_iter)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) goto proc_vif_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (!proc_create_net("ip6_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) sizeof(struct mr_mfc_iter)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) goto proc_cache_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) proc_cache_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) remove_proc_entry("ip6_mr_vif", net->proc_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) proc_vif_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) ip6mr_rules_exit(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) ip6mr_rules_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) ip6mr_notifier_exit(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) static void __net_exit ip6mr_net_exit(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) remove_proc_entry("ip6_mr_cache", net->proc_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) remove_proc_entry("ip6_mr_vif", net->proc_net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) ip6mr_rules_exit(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) ip6mr_notifier_exit(net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static struct pernet_operations ip6mr_net_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) .init = ip6mr_net_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) .exit = ip6mr_net_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) int __init ip6_mr_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) mrt_cachep = kmem_cache_create("ip6_mrt_cache",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) sizeof(struct mfc6_cache),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 0, SLAB_HWCACHE_ALIGN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (!mrt_cachep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) err = register_pernet_subsys(&ip6mr_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) goto reg_pernet_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) err = register_netdevice_notifier(&ip6_mr_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) goto reg_notif_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) #ifdef CONFIG_IPV6_PIMSM_V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) pr_err("%s: can't add PIM protocol\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) goto add_proto_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) NULL, ip6mr_rtm_dumproute, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (err == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) #ifdef CONFIG_IPV6_PIMSM_V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) add_proto_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) unregister_netdevice_notifier(&ip6_mr_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) reg_notif_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) unregister_pernet_subsys(&ip6mr_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) reg_pernet_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) kmem_cache_destroy(mrt_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) void ip6_mr_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) #ifdef CONFIG_IPV6_PIMSM_V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) unregister_netdevice_notifier(&ip6_mr_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) unregister_pernet_subsys(&ip6mr_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) kmem_cache_destroy(mrt_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) struct mf6cctl *mfc, int mrtsock, int parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) unsigned char ttls[MAXMIFS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) struct mfc6_cache *uc, *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) struct mr_mfc *_uc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) bool found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) if (mfc->mf6cc_parent >= MAXMIFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) return -ENFILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) memset(ttls, 255, MAXMIFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) for (i = 0; i < MAXMIFS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) if (IF_ISSET(i, &mfc->mf6cc_ifset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) ttls[i] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) /* The entries are added/deleted only under RTNL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) &mfc->mf6cc_mcastgrp.sin6_addr, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) write_lock_bh(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) c->_c.mfc_parent = mfc->mf6cc_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) ip6mr_update_thresholds(mrt, &c->_c, ttls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (!mrtsock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) c->_c.mfc_flags |= MFC_STATIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) write_unlock_bh(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) c, mrt->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) mr6_netlink_event(mrt, c, RTM_NEWROUTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) c = ip6mr_cache_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) c->_c.mfc_parent = mfc->mf6cc_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) ip6mr_update_thresholds(mrt, &c->_c, ttls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) if (!mrtsock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) c->_c.mfc_flags |= MFC_STATIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) ip6mr_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) pr_err("ip6mr: rhtable insert error %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) ip6mr_cache_free(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) /* Check to see if we resolved a queued list. If so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * need to send on the frames and tidy up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) spin_lock_bh(&mfc_unres_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) uc = (struct mfc6_cache *)_uc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) list_del(&_uc->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) atomic_dec(&mrt->cache_resolve_queue_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if (list_empty(&mrt->mfc_unres_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) del_timer(&mrt->ipmr_expire_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) spin_unlock_bh(&mfc_unres_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) ip6mr_cache_resolve(net, mrt, uc, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) ip6mr_cache_free(uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) c, mrt->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) mr6_netlink_event(mrt, c, RTM_NEWROUTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) * Close the multicast socket, and clear the vif tables etc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) static void mroute_clean_tables(struct mr_table *mrt, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) struct mr_mfc *c, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) /* Shut down all active vif entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) if (flags & (MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) for (i = 0; i < mrt->maxvif; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) !(flags & MRT6_FLUSH_MIFS_STATIC)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT6_FLUSH_MIFS)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) mif6_delete(mrt, i, 0, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) unregister_netdevice_many(&list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) /* Wipe the cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (flags & (MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) list_del_rcu(&c->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) FIB_EVENT_ENTRY_DEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) (struct mfc6_cache *)c, mrt->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) mr_cache_put(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (flags & MRT6_FLUSH_MFC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) spin_lock_bh(&mfc_unres_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) list_del(&c->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) mr6_netlink_event(mrt, (struct mfc6_cache *)c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) RTM_DELROUTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) spin_unlock_bh(&mfc_unres_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) write_lock_bh(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) if (rtnl_dereference(mrt->mroute_sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) err = -EADDRINUSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) rcu_assign_pointer(mrt->mroute_sk, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) sock_set_flag(sk, SOCK_RCU_FREE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) net->ipv6.devconf_all->mc_forwarding++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) write_unlock_bh(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) NETCONFA_MC_FORWARDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) NETCONFA_IFINDEX_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) net->ipv6.devconf_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) int ip6mr_sk_done(struct sock *sk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) int err = -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) if (sk->sk_type != SOCK_RAW ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) ip6mr_for_each_table(mrt, net) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (sk == rtnl_dereference(mrt->mroute_sk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) write_lock_bh(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) RCU_INIT_POINTER(mrt->mroute_sk, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) /* Note that mroute_sk had SOCK_RCU_FREE set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * so the RCU grace period before sk freeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * is guaranteed by sk_destruct()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) net->ipv6.devconf_all->mc_forwarding--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) write_unlock_bh(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) NETCONFA_MC_FORWARDING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) NETCONFA_IFINDEX_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) net->ipv6.devconf_all);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MFC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct flowi6 fl6 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) .flowi6_oif = skb->dev->ifindex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) .flowi6_mark = skb->mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) return rcu_access_pointer(mrt->mroute_sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) EXPORT_SYMBOL(mroute6_is_socket);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) * Socket options and virtual interface manipulation. The whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) * virtual interface system is a complete heap, but unfortunately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) * that's how BSD mrouted happens to think. Maybe one day with a proper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) * MOSPF/PIM router set up we can clean this up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) int ip6_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) unsigned int optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) int ret, parent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct mif6ctl vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) struct mf6cctl mfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) mifi_t mifi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) if (sk->sk_type != SOCK_RAW ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) if (!mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) if (optname != MRT6_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) if (sk != rcu_access_pointer(mrt->mroute_sk) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) !ns_capable(net->user_ns, CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) switch (optname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) case MRT6_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) if (optlen < sizeof(int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return ip6mr_sk_init(mrt, sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) case MRT6_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) return ip6mr_sk_done(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) case MRT6_ADD_MIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) if (optlen < sizeof(vif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) if (copy_from_sockptr(&vif, optval, sizeof(vif)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) if (vif.mif6c_mifi >= MAXMIFS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) return -ENFILE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) ret = mif6_add(net, mrt, &vif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) sk == rtnl_dereference(mrt->mroute_sk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) case MRT6_DEL_MIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) if (optlen < sizeof(mifi_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) if (copy_from_sockptr(&mifi, optval, sizeof(mifi_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) ret = mif6_delete(mrt, mifi, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * Manipulate the forwarding caches. These live
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * in a sort of kernel/user symbiosis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) case MRT6_ADD_MFC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) case MRT6_DEL_MFC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) parent = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) case MRT6_ADD_MFC_PROXY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) case MRT6_DEL_MFC_PROXY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) if (optlen < sizeof(mfc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (copy_from_sockptr(&mfc, optval, sizeof(mfc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (parent == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) parent = mfc.mf6cc_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) ret = ip6mr_mfc_delete(mrt, &mfc, parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) ret = ip6mr_mfc_add(net, mrt, &mfc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) sk ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) rtnl_dereference(mrt->mroute_sk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) case MRT6_FLUSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (optlen != sizeof(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (copy_from_sockptr(&flags, optval, sizeof(flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) mroute_clean_tables(mrt, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) * Control PIM assert (to activate pim will activate assert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) case MRT6_ASSERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) int v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (optlen != sizeof(v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (copy_from_sockptr(&v, optval, sizeof(v)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) mrt->mroute_do_assert = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) #ifdef CONFIG_IPV6_PIMSM_V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) case MRT6_PIM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) int v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (optlen != sizeof(v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) if (copy_from_sockptr(&v, optval, sizeof(v)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) v = !!v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) if (v != mrt->mroute_do_pim) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) mrt->mroute_do_pim = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) mrt->mroute_do_assert = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) case MRT6_TABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) u32 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) if (optlen != sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) if (copy_from_sockptr(&v, optval, sizeof(v)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (v != RT_TABLE_DEFAULT && v >= 100000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (sk == rcu_access_pointer(mrt->mroute_sk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) mrt = ip6mr_new_table(net, v);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) if (IS_ERR(mrt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) ret = PTR_ERR(mrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) raw6_sk(sk)->ip6mr_table = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * Spurious command, or MRT6_VERSION which you cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) * Getsock opt support for the multicast routing system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) int __user *optlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) int olr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (sk->sk_type != SOCK_RAW ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (!mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) switch (optname) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) case MRT6_VERSION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) val = 0x0305;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) #ifdef CONFIG_IPV6_PIMSM_V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) case MRT6_PIM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) val = mrt->mroute_do_pim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) case MRT6_ASSERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) val = mrt->mroute_do_assert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) return -ENOPROTOOPT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) if (get_user(olr, optlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) olr = min_t(int, olr, sizeof(int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) if (olr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) if (put_user(olr, optlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) if (copy_to_user(optval, &val, olr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) * The IP multicast ioctl support routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) struct sioc_sg_req6 sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) struct sioc_mif_req6 vr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) struct vif_device *vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) struct mfc6_cache *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) if (!mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) case SIOCGETMIFCNT_IN6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) if (copy_from_user(&vr, arg, sizeof(vr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) if (vr.mifi >= mrt->maxvif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) read_lock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) vif = &mrt->vif_table[vr.mifi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) if (VIF_EXISTS(mrt, vr.mifi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) vr.icount = vif->pkt_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) vr.ocount = vif->pkt_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) vr.ibytes = vif->bytes_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) vr.obytes = vif->bytes_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) read_unlock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) if (copy_to_user(arg, &vr, sizeof(vr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) read_unlock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) case SIOCGETSGCNT_IN6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (copy_from_user(&sr, arg, sizeof(sr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) if (c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) sr.pktcnt = c->_c.mfc_un.res.pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) sr.bytecnt = c->_c.mfc_un.res.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) sr.wrong_if = c->_c.mfc_un.res.wrong_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (copy_to_user(arg, &sr, sizeof(sr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) return -ENOIOCTLCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) struct compat_sioc_sg_req6 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) struct sockaddr_in6 src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) struct sockaddr_in6 grp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) compat_ulong_t pktcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) compat_ulong_t bytecnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) compat_ulong_t wrong_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) struct compat_sioc_mif_req6 {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) mifi_t mifi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) compat_ulong_t icount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) compat_ulong_t ocount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) compat_ulong_t ibytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) compat_ulong_t obytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) struct compat_sioc_sg_req6 sr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) struct compat_sioc_mif_req6 vr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) struct vif_device *vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) struct mfc6_cache *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) struct net *net = sock_net(sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) if (!mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) case SIOCGETMIFCNT_IN6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (copy_from_user(&vr, arg, sizeof(vr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) if (vr.mifi >= mrt->maxvif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) read_lock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) vif = &mrt->vif_table[vr.mifi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (VIF_EXISTS(mrt, vr.mifi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) vr.icount = vif->pkt_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) vr.ocount = vif->pkt_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) vr.ibytes = vif->bytes_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) vr.obytes = vif->bytes_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) read_unlock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) if (copy_to_user(arg, &vr, sizeof(vr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) read_unlock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) case SIOCGETSGCNT_IN6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) if (copy_from_user(&sr, arg, sizeof(sr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) if (c) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) sr.pktcnt = c->_c.mfc_un.res.pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) sr.bytecnt = c->_c.mfc_un.res.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) sr.wrong_if = c->_c.mfc_un.res.wrong_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) if (copy_to_user(arg, &sr, sizeof(sr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) return -EADDRNOTAVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) return -ENOIOCTLCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) IPSTATS_MIB_OUTFORWDATAGRAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) IPSTATS_MIB_OUTOCTETS, skb->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) return dst_output(net, sk, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) * Processing handlers for ip6mr_forward
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) struct sk_buff *skb, int vifi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) struct ipv6hdr *ipv6h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) struct vif_device *vif = &mrt->vif_table[vifi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) struct flowi6 fl6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (!vif->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) #ifdef CONFIG_IPV6_PIMSM_V2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) if (vif->flags & MIFF_REGISTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) vif->pkt_out++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) vif->bytes_out += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) vif->dev->stats.tx_bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) vif->dev->stats.tx_packets++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) ipv6h = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) fl6 = (struct flowi6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) .flowi6_oif = vif->link,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) .daddr = ipv6h->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) dst = ip6_route_output(net, NULL, &fl6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) if (dst->error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) dst_release(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) skb_dst_drop(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) skb_dst_set(skb, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) * not only before forwarding, but after forwarding on all output
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * interfaces. It is clear, if mrouter runs a multicasting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) * program, it should receive packets not depending to what interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * program is joined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * If we will not make it, the program will have to join on all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * interfaces. On the other hand, multihoming host (or router, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) * not mrouter) cannot join to more than one interface - it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) * result in receiving multiple packets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) dev = vif->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) skb->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) vif->pkt_out++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) vif->bytes_out += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) /* We are about to write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) /* XXX: extension headers? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) ipv6h = ipv6_hdr(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) ipv6h->hop_limit--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) IP6CB(skb)->flags |= IP6SKB_FORWARDED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) net, NULL, skb, skb->dev, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) ip6mr_forward2_finish);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) int ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) if (mrt->vif_table[ct].dev == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) return ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) struct net_device *dev, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) struct mfc6_cache *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) int psend = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) int vif, ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) int true_vifi = ip6mr_find_vif(mrt, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) vif = c->_c.mfc_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) c->_c.mfc_un.res.pkt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) c->_c.mfc_un.res.bytes += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) c->_c.mfc_un.res.lastuse = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) struct mfc6_cache *cache_proxy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /* For an (*,G) entry, we only check that the incoming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) * interface is part of the static tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) cache_proxy = mr_mfc_find_any_parent(mrt, vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) if (cache_proxy &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) goto forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) * Wrong interface: drop packet and (maybe) send PIM assert.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) if (mrt->vif_table[vif].dev != dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) c->_c.mfc_un.res.wrong_if++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) if (true_vifi >= 0 && mrt->mroute_do_assert &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) /* pimsm uses asserts, when switching from RPT to SPT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) so that we cannot check that packet arrived on an oif.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) It is bad, but otherwise we would need to move pretty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) large chunk of pimd to kernel. Ough... --ANK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) (mrt->mroute_do_pim ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) time_after(jiffies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) c->_c.mfc_un.res.last_assert +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) MFC_ASSERT_THRESH)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) c->_c.mfc_un.res.last_assert = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) goto dont_forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) forward:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) mrt->vif_table[vif].pkt_in++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) mrt->vif_table[vif].bytes_in += skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) * Forward the frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) if (ipv6_addr_any(&c->mf6c_origin) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) ipv6_addr_any(&c->mf6c_mcastgrp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) if (true_vifi >= 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) true_vifi != c->_c.mfc_parent &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) ipv6_hdr(skb)->hop_limit >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) /* It's an (*,*) entry and the packet is not coming from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) * the upstream: forward the packet to the upstream
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) * only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) psend = c->_c.mfc_parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) goto last_forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) goto dont_forward;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) for (ct = c->_c.mfc_un.res.maxvif - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) ct >= c->_c.mfc_un.res.minvif; ct--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) /* For (*,G) entry, don't forward to the incoming interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) if (psend != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) if (skb2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) ip6mr_forward2(net, mrt, skb2, psend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) psend = ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) last_forward:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) if (psend != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) ip6mr_forward2(net, mrt, skb, psend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) dont_forward:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * Multicast packets for forwarding arrive here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) int ip6_mr_input(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) struct mfc6_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) struct net *net = dev_net(skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) struct flowi6 fl6 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) .flowi6_iif = skb->dev->ifindex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) .flowi6_mark = skb->mark,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) /* skb->dev passed in is the master dev for vrfs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) * Get the proper interface that does have a vif associated with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) dev = skb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) if (netif_is_l3_master(skb->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) err = ip6mr_fib_lookup(net, &fl6, &mrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) read_lock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) cache = ip6mr_cache_find(mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) if (!cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) int vif = ip6mr_find_vif(mrt, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) if (vif >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) cache = ip6mr_cache_find_any(mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) &ipv6_hdr(skb)->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) * No usable cache entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) if (!cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) int vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) vif = ip6mr_find_vif(mrt, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) if (vif >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) int err = ip6mr_cache_unresolved(mrt, vif, skb, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) read_unlock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) read_unlock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) ip6_mr_forward(net, mrt, dev, skb, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) read_unlock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) u32 portid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) struct mfc6_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) if (!mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) read_lock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) if (!cache && skb->dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) int vif = ip6mr_find_vif(mrt, skb->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) if (vif >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) vif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (!cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) struct sk_buff *skb2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) struct ipv6hdr *iph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) int vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) dev = skb->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) read_unlock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) /* really correct? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) if (!skb2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) read_unlock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) NETLINK_CB(skb2).portid = portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) skb_reset_transport_header(skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) skb_put(skb2, sizeof(struct ipv6hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) skb_reset_network_header(skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) iph = ipv6_hdr(skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) iph->version = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) iph->priority = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) iph->flow_lbl[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) iph->flow_lbl[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) iph->flow_lbl[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) iph->payload_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) iph->nexthdr = IPPROTO_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) iph->hop_limit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) iph->saddr = rt->rt6i_src.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) iph->daddr = rt->rt6i_dst.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) err = ip6mr_cache_unresolved(mrt, vif, skb2, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) read_unlock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) read_unlock(&mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) struct rtmsg *rtm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) if (!nlh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) rtm = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) rtm->rtm_family = RTNL_FAMILY_IP6MR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) rtm->rtm_dst_len = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) rtm->rtm_src_len = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) rtm->rtm_tos = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) rtm->rtm_table = mrt->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) if (nla_put_u32(skb, RTA_TABLE, mrt->id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) rtm->rtm_type = RTN_MULTICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) rtm->rtm_scope = RT_SCOPE_UNIVERSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) if (c->_c.mfc_flags & MFC_STATIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) rtm->rtm_protocol = RTPROT_STATIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) rtm->rtm_protocol = RTPROT_MROUTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) rtm->rtm_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) /* do not break the dump if cache is unresolved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) if (err < 0 && err != -ENOENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) nlmsg_end(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) nlmsg_cancel(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) u32 portid, u32 seq, struct mr_mfc *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) int cmd, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) cmd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) static int mr6_msgsize(bool unresolved, int maxvif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) size_t len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) NLMSG_ALIGN(sizeof(struct rtmsg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) + nla_total_size(4) /* RTA_TABLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) if (!unresolved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) len = len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) + nla_total_size(4) /* RTA_IIF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) + nla_total_size(0) /* RTA_MULTIPATH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) /* RTA_MFC_STATS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) struct net *net = read_pnet(&mrt->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) int err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) size_t len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) NLMSG_ALIGN(sizeof(struct rtgenmsg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) /* IP6MRA_CREPORT_SRC_ADDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) + nla_total_size(sizeof(struct in6_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) /* IP6MRA_CREPORT_DST_ADDR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) + nla_total_size(sizeof(struct in6_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) /* IP6MRA_CREPORT_PKT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) + nla_total_size(payloadlen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) struct net *net = read_pnet(&mrt->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) struct rtgenmsg *rtgenm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) struct mrt6msg *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) struct nlattr *nla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) int payloadlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) payloadlen = pkt->len - sizeof(struct mrt6msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) msg = (struct mrt6msg *)skb_transport_header(pkt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) sizeof(struct rtgenmsg), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) if (!nlh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) rtgenm = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) &msg->im6_src) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) &msg->im6_dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) nla_data(nla), payloadlen))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) nlmsg_end(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) nlmsg_cancel(skb, nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) const struct nlmsghdr *nlh = cb->nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) struct fib_dump_filter filter = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) if (cb->strict_check) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) err = ip_valid_fib_dump_req(sock_net(skb->sk), nlh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) &filter, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) if (filter.table_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) if (!mrt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) err = mr_table_dump(mrt, skb, cb, _ip6mr_fill_mroute,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) &mfc_unres_lock, &filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) return skb->len ? : err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) _ip6mr_fill_mroute, &mfc_unres_lock, &filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) }