^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * net/sched/act_mirred.c packet mirroring and redirect actions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors: Jamal Hadi Salim (2002-4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * TODO: Add ingress support (and socket redirect support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/dst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/tc_act/tc_mirred.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <net/tc_act/tc_mirred.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static LIST_HEAD(mirred_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static DEFINE_SPINLOCK(mirred_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define MIRRED_RECURSION_LIMIT 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static bool tcf_mirred_is_act_redirect(int action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static bool tcf_mirred_act_wants_ingress(int action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) switch (action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) case TCA_EGRESS_REDIR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) case TCA_EGRESS_MIRROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) case TCA_INGRESS_REDIR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) case TCA_INGRESS_MIRROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) static bool tcf_mirred_can_reinsert(int action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) switch (action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) case TC_ACT_SHOT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) case TC_ACT_STOLEN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) case TC_ACT_QUEUED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) case TC_ACT_TRAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return rcu_dereference_protected(m->tcfm_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) lockdep_is_held(&m->tcf_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) static void tcf_mirred_release(struct tc_action *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct tcf_mirred *m = to_mirred(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) spin_lock(&mirred_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) list_del(&m->tcfm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) spin_unlock(&mirred_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) /* last reference to action, no need to lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) dev = rcu_dereference_protected(m->tcfm_dev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) [TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static unsigned int mirred_net_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static struct tc_action_ops act_mirred_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static int tcf_mirred_init(struct net *net, struct nlattr *nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct nlattr *est, struct tc_action **a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) int ovr, int bind, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u32 flags, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct tc_action_net *tn = net_generic(net, mirred_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct nlattr *tb[TCA_MIRRED_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) struct tcf_chain *goto_ch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) bool mac_header_xmit = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct tc_mirred *parm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct tcf_mirred *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) bool exists = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) int ret, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (!nla) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) mirred_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (!tb[TCA_MIRRED_PARMS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) parm = nla_data(tb[TCA_MIRRED_PARMS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) index = parm->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) err = tcf_idr_check_alloc(tn, &index, a, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) exists = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (exists && bind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) switch (parm->eaction) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) case TCA_EGRESS_MIRROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) case TCA_EGRESS_REDIR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) case TCA_INGRESS_REDIR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) case TCA_INGRESS_MIRROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) if (exists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) tcf_idr_release(*a, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) tcf_idr_cleanup(tn, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) if (!exists) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) if (!parm->ifindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) tcf_idr_cleanup(tn, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ret = tcf_idr_create_from_flags(tn, index, est, a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) &act_mirred_ops, bind, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) tcf_idr_cleanup(tn, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ret = ACT_P_CREATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) } else if (!ovr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) tcf_idr_release(*a, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) m = to_mirred(*a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (ret == ACT_P_CREATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) INIT_LIST_HEAD(&m->tcfm_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) goto release_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) spin_lock_bh(&m->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (parm->ifindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) dev = dev_get_by_index(net, parm->ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) spin_unlock_bh(&m->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) goto put_chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) mac_header_xmit = dev_is_mac_header_xmit(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) dev = rcu_replace_pointer(m->tcfm_dev, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) lockdep_is_held(&m->tcf_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) m->tcfm_mac_header_xmit = mac_header_xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) m->tcfm_eaction = parm->eaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) spin_unlock_bh(&m->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (goto_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) tcf_chain_put_by_act(goto_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (ret == ACT_P_CREATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) spin_lock(&mirred_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) list_add(&m->tcfm_list, &mirred_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) spin_unlock(&mirred_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) put_chain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (goto_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) tcf_chain_put_by_act(goto_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) release_idr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) tcf_idr_release(*a, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) struct tcf_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct tcf_mirred *m = to_mirred(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct sk_buff *skb2 = skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) bool m_mac_header_xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) unsigned int rec_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int retval, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) bool use_reinsert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) bool want_ingress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) bool is_redirect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) bool expects_nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) bool at_ingress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int m_eaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) int mac_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) bool at_nh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) rec_level = __this_cpu_inc_return(mirred_rec_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) netdev_name(skb->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) __this_cpu_dec(mirred_rec_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return TC_ACT_SHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) tcf_lastuse_update(&m->tcf_tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) tcf_action_update_bstats(&m->common, skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) m_eaction = READ_ONCE(m->tcfm_eaction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) retval = READ_ONCE(m->tcf_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) dev = rcu_dereference_bh(m->tcfm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (unlikely(!dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) pr_notice_once("tc mirred: target device is gone\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (unlikely(!(dev->flags & IFF_UP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) dev->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) /* we could easily avoid the clone only if called by ingress and clsact;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * since we can't easily detect the clsact caller, skip clone only for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * ingress - that covers the TC S/W datapath.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) is_redirect = tcf_mirred_is_act_redirect(m_eaction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) at_ingress = skb_at_tc_ingress(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) use_reinsert = at_ingress && is_redirect &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) tcf_mirred_can_reinsert(retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (!use_reinsert) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) skb2 = skb_clone(skb, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (!skb2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /* All mirred/redirected skbs should clear previous ct info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) nf_reset_ct(skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) skb_dst_drop(skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) expects_nh = want_ingress || !m_mac_header_xmit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) at_nh = skb->data == skb_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (at_nh != expects_nh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) skb_network_header(skb) - skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (expects_nh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /* target device/action expect data at nh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) skb_pull_rcsum(skb2, mac_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* target device/action expect data at mac */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) skb_push_rcsum(skb2, mac_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) skb2->skb_iif = skb->dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) skb2->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* mirror is always swallowed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (is_redirect) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) skb_set_redirected(skb2, skb2->tc_at_ingress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* let's the caller reinsert the packet, if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (use_reinsert) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) res->ingress = want_ingress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) if (skb_tc_reinsert(skb, res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) tcf_action_inc_overlimit_qstats(&m->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) __this_cpu_dec(mirred_rec_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return TC_ACT_CONSUMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (!want_ingress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) err = dev_queue_xmit(skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) err = netif_receive_skb(skb2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) tcf_action_inc_overlimit_qstats(&m->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (tcf_mirred_is_act_redirect(m_eaction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) retval = TC_ACT_SHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) __this_cpu_dec(mirred_rec_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) u64 drops, u64 lastuse, bool hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct tcf_mirred *m = to_mirred(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct tcf_t *tm = &m->tcf_tm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) tcf_action_update_stats(a, bytes, packets, drops, hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) tm->lastuse = max_t(u64, tm->lastuse, lastuse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) int ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) unsigned char *b = skb_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) struct tcf_mirred *m = to_mirred(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) struct tc_mirred opt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) .index = m->tcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) .refcnt = refcount_read(&m->tcf_refcnt) - ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) .bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct tcf_t t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) spin_lock_bh(&m->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) opt.action = m->tcf_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) opt.eaction = m->tcfm_eaction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) dev = tcf_mirred_dev_dereference(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) opt.ifindex = dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) tcf_tm_dump(&t, &m->tcf_tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) spin_unlock_bh(&m->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) spin_unlock_bh(&m->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) nlmsg_trim(skb, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct netlink_callback *cb, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) const struct tc_action_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) struct tc_action_net *tn = net_generic(net, mirred_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return tcf_generic_walker(tn, skb, cb, type, ops, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct tc_action_net *tn = net_generic(net, mirred_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return tcf_idr_search(tn, a, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static int mirred_device_event(struct notifier_block *unused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) unsigned long event, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) struct net_device *dev = netdev_notifier_info_to_dev(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct tcf_mirred *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) ASSERT_RTNL();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (event == NETDEV_UNREGISTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) spin_lock(&mirred_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) list_for_each_entry(m, &mirred_list, tcfm_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) spin_lock_bh(&m->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (tcf_mirred_dev_dereference(m) == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* Note : no rcu grace period necessary, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * net_device are already rcu protected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) RCU_INIT_POINTER(m->tcfm_dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) spin_unlock_bh(&m->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) spin_unlock(&mirred_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) static struct notifier_block mirred_device_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) .notifier_call = mirred_device_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) static void tcf_mirred_dev_put(void *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct net_device *dev = priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) dev_put(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static struct net_device *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) tcf_mirred_get_dev(const struct tc_action *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) tc_action_priv_destructor *destructor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct tcf_mirred *m = to_mirred(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) dev = rcu_dereference(m->tcfm_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) dev_hold(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) *destructor = tcf_mirred_dev_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return nla_total_size(sizeof(struct tc_mirred));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static struct tc_action_ops act_mirred_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) .kind = "mirred",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) .id = TCA_ID_MIRRED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) .act = tcf_mirred_act,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) .stats_update = tcf_stats_update,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) .dump = tcf_mirred_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) .cleanup = tcf_mirred_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) .init = tcf_mirred_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) .walk = tcf_mirred_walker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) .lookup = tcf_mirred_search,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) .get_fill_size = tcf_mirred_get_fill_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) .size = sizeof(struct tcf_mirred),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) .get_dev = tcf_mirred_get_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static __net_init int mirred_init_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct tc_action_net *tn = net_generic(net, mirred_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return tc_action_net_init(net, tn, &act_mirred_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static void __net_exit mirred_exit_net(struct list_head *net_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) tc_action_net_exit(net_list, mirred_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static struct pernet_operations mirred_net_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) .init = mirred_init_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) .exit_batch = mirred_exit_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) .id = &mirred_net_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) .size = sizeof(struct tc_action_net),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) MODULE_AUTHOR("Jamal Hadi Salim(2002)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) MODULE_DESCRIPTION("Device Mirror/redirect actions");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static int __init mirred_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) int err = register_netdevice_notifier(&mirred_device_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) pr_info("Mirror/redirect action on\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) unregister_netdevice_notifier(&mirred_device_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static void __exit mirred_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) unregister_netdevice_notifier(&mirred_device_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) module_init(mirred_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) module_exit(mirred_cleanup_module);