^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * net/l3mdev/l3mdev.c - L3 master device implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2015 Cumulus Networks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <net/fib_rules.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <net/l3mdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) static DEFINE_SPINLOCK(l3mdev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) struct l3mdev_handler {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) lookup_by_table_id_t dev_lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static struct l3mdev_handler l3mdev_handlers[L3MDEV_TYPE_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static int l3mdev_check_type(enum l3mdev_type l3type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) if (l3type <= L3MDEV_TYPE_UNSPEC || l3type > L3MDEV_TYPE_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int l3mdev_table_lookup_register(enum l3mdev_type l3type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) lookup_by_table_id_t fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct l3mdev_handler *hdlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) res = l3mdev_check_type(l3type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) hdlr = &l3mdev_handlers[l3type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) spin_lock(&l3mdev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (hdlr->dev_lookup) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) res = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) hdlr->dev_lookup = fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) spin_unlock(&l3mdev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) EXPORT_SYMBOL_GPL(l3mdev_table_lookup_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) void l3mdev_table_lookup_unregister(enum l3mdev_type l3type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) lookup_by_table_id_t fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct l3mdev_handler *hdlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (l3mdev_check_type(l3type))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) hdlr = &l3mdev_handlers[l3type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) spin_lock(&l3mdev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (hdlr->dev_lookup == fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) hdlr->dev_lookup = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) spin_unlock(&l3mdev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) EXPORT_SYMBOL_GPL(l3mdev_table_lookup_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct net *net, u32 table_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) lookup_by_table_id_t lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct l3mdev_handler *hdlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int ifindex = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) res = l3mdev_check_type(l3type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) hdlr = &l3mdev_handlers[l3type];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) spin_lock(&l3mdev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) lookup = hdlr->dev_lookup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (!lookup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ifindex = lookup(net, table_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) spin_unlock(&l3mdev_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) EXPORT_SYMBOL_GPL(l3mdev_ifindex_lookup_by_table_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * l3mdev_master_ifindex - get index of L3 master device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * @dev: targeted interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) int l3mdev_master_ifindex_rcu(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) int ifindex = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (netif_is_l3_master(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ifindex = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) } else if (netif_is_l3_slave(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct net_device *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct net_device *_dev = (struct net_device *)dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* netdev_master_upper_dev_get_rcu calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * list_first_or_null_rcu to walk the upper dev list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * list_first_or_null_rcu does not handle a const arg. We aren't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * making changes, just want the master device from that list so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * typecast to remove the const
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) master = netdev_master_upper_dev_get_rcu(_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (master)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) ifindex = master->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) EXPORT_SYMBOL_GPL(l3mdev_master_ifindex_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * l3mdev_master_upper_ifindex_by_index - get index of upper l3 master
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) * @net: network namespace for device index lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * @ifindex: targeted interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) dev = dev_get_by_index_rcu(net, ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) while (dev && !netif_is_l3_master(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) dev = netdev_master_upper_dev_get(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return dev ? dev->ifindex : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) EXPORT_SYMBOL_GPL(l3mdev_master_upper_ifindex_by_index_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * l3mdev_fib_table_rcu - get FIB table id associated with an L3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * master interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * @dev: targeted interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u32 l3mdev_fib_table_rcu(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) u32 tb_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (netif_is_l3_master(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (dev->l3mdev_ops->l3mdev_fib_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) tb_id = dev->l3mdev_ops->l3mdev_fib_table(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) } else if (netif_is_l3_slave(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* Users of netdev_master_upper_dev_get_rcu need non-const,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * but current inet_*type functions take a const
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct net_device *_dev = (struct net_device *) dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) const struct net_device *master;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) master = netdev_master_upper_dev_get_rcu(_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) if (master &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) master->l3mdev_ops->l3mdev_fib_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) tb_id = master->l3mdev_ops->l3mdev_fib_table(master);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return tb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) EXPORT_SYMBOL_GPL(l3mdev_fib_table_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) u32 tb_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (!ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dev = dev_get_by_index_rcu(net, ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) tb_id = l3mdev_fib_table_rcu(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return tb_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) EXPORT_SYMBOL_GPL(l3mdev_fib_table_by_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * l3mdev_link_scope_lookup - IPv6 route lookup based on flow for link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * local and multicast addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * @net: network namespace for device index lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * @fl6: IPv6 flow struct for lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * This function does not hold refcnt on the returned dst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * Caller must hold rcu_read_lock().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct dst_entry *l3mdev_link_scope_lookup(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct flowi6 *fl6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct dst_entry *dst = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) WARN_ON_ONCE(!rcu_read_lock_held());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (fl6->flowi6_oif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) dev = dev_get_by_index_rcu(net, fl6->flowi6_oif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (dev && netif_is_l3_slave(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) dev = netdev_master_upper_dev_get_rcu(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (dev && netif_is_l3_master(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) dev->l3mdev_ops->l3mdev_link_scope_lookup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) dst = dev->l3mdev_ops->l3mdev_link_scope_lookup(dev, fl6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) EXPORT_SYMBOL_GPL(l3mdev_link_scope_lookup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * l3mdev_fib_rule_match - Determine if flowi references an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * L3 master device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * @net: network namespace for device index lookup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * @fl: flow struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct fib_lookup_arg *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) dev = dev_get_by_index_rcu(net, fl->flowi_oif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (dev && netif_is_l3_master(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) dev->l3mdev_ops->l3mdev_fib_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) dev = dev_get_by_index_rcu(net, fl->flowi_iif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (dev && netif_is_l3_master(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) dev->l3mdev_ops->l3mdev_fib_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) void l3mdev_update_flow(struct net *net, struct flowi *fl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) int ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (fl->flowi_oif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) dev = dev_get_by_index_rcu(net, fl->flowi_oif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) ifindex = l3mdev_master_ifindex_rcu(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (ifindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) fl->flowi_oif = ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) fl->flowi_flags |= FLOWI_FLAG_SKIP_NH_OIF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (fl->flowi_iif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) dev = dev_get_by_index_rcu(net, fl->flowi_iif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) ifindex = l3mdev_master_ifindex_rcu(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (ifindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) fl->flowi_iif = ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) fl->flowi_flags |= FLOWI_FLAG_SKIP_NH_OIF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) EXPORT_SYMBOL_GPL(l3mdev_update_flow);