^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* Linux multicast routing support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/rhashtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/mroute_base.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) /* Sets everything common except 'dev', since that is done under locking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) void vif_device_init(struct vif_device *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) unsigned long rate_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) unsigned char threshold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) unsigned short flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) unsigned short get_iflink_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) v->dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) v->bytes_in = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) v->bytes_out = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) v->pkt_in = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) v->pkt_out = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) v->rate_limit = rate_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) v->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) v->threshold = threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) if (v->flags & get_iflink_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) v->link = dev_get_iflink(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) v->link = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) EXPORT_SYMBOL(vif_device_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct mr_table *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) mr_table_alloc(struct net *net, u32 id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct mr_table_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) void (*expire_func)(struct timer_list *t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) void (*table_set)(struct mr_table *mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct net *net))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (!mrt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) mrt->id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) write_pnet(&mrt->net, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) mrt->ops = *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) err = rhltable_init(&mrt->mfc_hash, mrt->ops.rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) kfree(mrt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) INIT_LIST_HEAD(&mrt->mfc_cache_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) INIT_LIST_HEAD(&mrt->mfc_unres_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) timer_setup(&mrt->ipmr_expire_timer, expire_func, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) mrt->mroute_reg_vif_num = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) table_set(mrt, net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) EXPORT_SYMBOL(mr_table_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) void *mr_mfc_find_parent(struct mr_table *mrt, void *hasharg, int parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct rhlist_head *tmp, *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct mr_mfc *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) rhl_for_each_entry_rcu(c, tmp, list, mnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (parent == -1 || parent == c->mfc_parent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) EXPORT_SYMBOL(mr_mfc_find_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) void *mr_mfc_find_any_parent(struct mr_table *mrt, int vifi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct rhlist_head *tmp, *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct mr_mfc *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) list = rhltable_lookup(&mrt->mfc_hash, mrt->ops.cmparg_any,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) *mrt->ops.rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) rhl_for_each_entry_rcu(c, tmp, list, mnode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (c->mfc_un.res.ttls[vifi] < 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) EXPORT_SYMBOL(mr_mfc_find_any_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) void *mr_mfc_find_any(struct mr_table *mrt, int vifi, void *hasharg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct rhlist_head *tmp, *list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct mr_mfc *c, *proxy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) list = rhltable_lookup(&mrt->mfc_hash, hasharg, *mrt->ops.rht_params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) rhl_for_each_entry_rcu(c, tmp, list, mnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (c->mfc_un.res.ttls[vifi] < 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* It's ok if the vifi is part of the static tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) proxy = mr_mfc_find_any_parent(mrt, c->mfc_parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return mr_mfc_find_any_parent(mrt, vifi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) EXPORT_SYMBOL(mr_mfc_find_any);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void *mr_vif_seq_idx(struct net *net, struct mr_vif_iter *iter, loff_t pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct mr_table *mrt = iter->mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (!VIF_EXISTS(mrt, iter->ct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (pos-- == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return &mrt->vif_table[iter->ct];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) EXPORT_SYMBOL(mr_vif_seq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) void *mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct mr_vif_iter *iter = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct net *net = seq_file_net(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct mr_table *mrt = iter->mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) ++*pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (v == SEQ_START_TOKEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return mr_vif_seq_idx(net, iter, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) while (++iter->ct < mrt->maxvif) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (!VIF_EXISTS(mrt, iter->ct))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return &mrt->vif_table[iter->ct];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) EXPORT_SYMBOL(mr_vif_seq_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void *mr_mfc_seq_idx(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct mr_mfc_iter *it, loff_t pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct mr_table *mrt = it->mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct mr_mfc *mfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) it->cache = &mrt->mfc_cache_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (pos-- == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return mfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) spin_lock_bh(it->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) it->cache = &mrt->mfc_unres_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) list_for_each_entry(mfc, it->cache, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (pos-- == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return mfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) spin_unlock_bh(it->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) it->cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) EXPORT_SYMBOL(mr_mfc_seq_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) void *mr_mfc_seq_next(struct seq_file *seq, void *v,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct mr_mfc_iter *it = seq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct net *net = seq_file_net(seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct mr_table *mrt = it->mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) struct mr_mfc *c = v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) ++*pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (v == SEQ_START_TOKEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return mr_mfc_seq_idx(net, seq->private, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (c->list.next != it->cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return list_entry(c->list.next, struct mr_mfc, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (it->cache == &mrt->mfc_unres_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) goto end_of_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* exhausted cache_array, show unresolved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) it->cache = &mrt->mfc_unres_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) spin_lock_bh(it->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (!list_empty(it->cache))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return list_first_entry(it->cache, struct mr_mfc, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) end_of_list:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) spin_unlock_bh(it->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) it->cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) EXPORT_SYMBOL(mr_mfc_seq_next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) int mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct mr_mfc *c, struct rtmsg *rtm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct rta_mfc_stats mfcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct nlattr *mp_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct rtnexthop *nhp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned long lastuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) int ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* If cache is unresolved, don't try to parse IIF and OIF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (c->mfc_parent >= MAXVIFS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) rtm->rtm_flags |= RTNH_F_UNRESOLVED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (VIF_EXISTS(mrt, c->mfc_parent) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) nla_put_u32(skb, RTA_IIF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (c->mfc_flags & MFC_OFFLOAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) rtm->rtm_flags |= RTNH_F_OFFLOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) mp_attr = nla_nest_start_noflag(skb, RTA_MULTIPATH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (!mp_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct vif_device *vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (!nhp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) nla_nest_cancel(skb, mp_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) nhp->rtnh_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) vif = &mrt->vif_table[ct];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) nhp->rtnh_ifindex = vif->dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) nhp->rtnh_len = sizeof(*nhp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) nla_nest_end(skb, mp_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) lastuse = READ_ONCE(c->mfc_un.res.lastuse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) mfcs.mfcs_packets = c->mfc_un.res.pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) mfcs.mfcs_bytes = c->mfc_un.res.bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) RTA_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) rtm->rtm_type = RTN_MULTICAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) EXPORT_SYMBOL(mr_fill_mroute);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static bool mr_mfc_uses_dev(const struct mr_table *mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) const struct mr_mfc *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) const struct vif_device *vif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) vif = &mrt->vif_table[ct];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (vif->dev == dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int (*fill)(struct mr_table *mrt, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) u32 portid, u32 seq, struct mr_mfc *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int cmd, int flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) spinlock_t *lock, struct fib_dump_filter *filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) unsigned int e = 0, s_e = cb->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) unsigned int flags = NLM_F_MULTI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct mr_mfc *mfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (filter->filter_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) flags |= NLM_F_DUMP_FILTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (e < s_e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) goto next_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) if (filter->dev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) !mr_mfc_uses_dev(mrt, mfc, filter->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) goto next_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) next_entry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) e++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) spin_lock_bh(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (e < s_e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) goto next_entry2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (filter->dev &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) !mr_mfc_uses_dev(mrt, mfc, filter->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) goto next_entry2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) err = fill(mrt, skb, NETLINK_CB(cb->skb).portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) cb->nlh->nlmsg_seq, mfc, RTM_NEWROUTE, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) spin_unlock_bh(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) next_entry2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) e++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) spin_unlock_bh(lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) cb->args[1] = e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) EXPORT_SYMBOL(mr_table_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct mr_table *(*iter)(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct mr_table *mrt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int (*fill)(struct mr_table *mrt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) u32 portid, u32 seq, struct mr_mfc *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) int cmd, int flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) spinlock_t *lock, struct fib_dump_filter *filter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) unsigned int t = 0, s_t = cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* multicast does not track protocol or have route type other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * than RTN_MULTICAST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (filter->filter_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (filter->protocol || filter->flags ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) (filter->rt_type && filter->rt_type != RTN_MULTICAST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) for (mrt = iter(net, NULL); mrt; mrt = iter(net, mrt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (t < s_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) goto next_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) err = mr_table_dump(mrt, skb, cb, fill, lock, filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) cb->args[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) next_table:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) t++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) cb->args[0] = t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) EXPORT_SYMBOL(mr_rtm_dumproute);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int (*rules_dump)(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct notifier_block *nb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct netlink_ext_ack *extack),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct mr_table *(*mr_iter)(struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct mr_table *mrt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) rwlock_t *mrt_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct mr_table *mrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) err = rules_dump(net, nb, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) for (mrt = mr_iter(net, NULL); mrt; mrt = mr_iter(net, mrt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct vif_device *v = &mrt->vif_table[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) struct mr_mfc *mfc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int vifi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* Notifiy on table VIF entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) read_lock(mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) for (vifi = 0; vifi < mrt->maxvif; vifi++, v++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (!v->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) err = mr_call_vif_notifier(nb, family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) FIB_EVENT_VIF_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) v, vifi, mrt->id, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) read_unlock(mrt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* Notify on table MFC entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) err = mr_call_mfc_notifier(nb, family,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) FIB_EVENT_ENTRY_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) mfc, mrt->id, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) EXPORT_SYMBOL(mr_dump);