^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * net/sched/act_skbmod.c skb data modifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2016 Jamal Hadi Salim <jhs@mojatatu.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/if_arp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <net/pkt_sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/tc_act/tc_skbmod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/tc_act/tc_skbmod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static unsigned int skbmod_net_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static struct tc_action_ops act_skbmod_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define MAX_EDIT_LEN ETH_HLEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static int tcf_skbmod_act(struct sk_buff *skb, const struct tc_action *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct tcf_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct tcf_skbmod *d = to_skbmod(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) int action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct tcf_skbmod_params *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) u64 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) tcf_lastuse_update(&d->tcf_tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) action = READ_ONCE(d->tcf_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (unlikely(action == TC_ACT_SHOT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (!skb->dev || skb->dev->type != ARPHRD_ETHER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* XXX: if you are going to edit more fields beyond ethernet header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * (example when you add IP header replacement or vlan swap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * then MAX_EDIT_LEN needs to change appropriately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) err = skb_ensure_writable(skb, MAX_EDIT_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (unlikely(err)) /* best policy is to drop on the floor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) goto drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) p = rcu_dereference_bh(d->skbmod_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) flags = p->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (flags & SKBMOD_F_DMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) ether_addr_copy(eth_hdr(skb)->h_dest, p->eth_dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (flags & SKBMOD_F_SMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) ether_addr_copy(eth_hdr(skb)->h_source, p->eth_src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (flags & SKBMOD_F_ETYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) eth_hdr(skb)->h_proto = p->eth_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (flags & SKBMOD_F_SWAPMAC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) u16 tmpaddr[ETH_ALEN / 2]; /* ether_addr_copy() requirement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*XXX: I am sure we can come up with more efficient swapping*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) ether_addr_copy((u8 *)tmpaddr, eth_hdr(skb)->h_dest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) ether_addr_copy(eth_hdr(skb)->h_dest, eth_hdr(skb)->h_source);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) ether_addr_copy(eth_hdr(skb)->h_source, (u8 *)tmpaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) return action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) drop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) qstats_overlimit_inc(this_cpu_ptr(d->common.cpu_qstats));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return TC_ACT_SHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) [TCA_SKBMOD_PARMS] = { .len = sizeof(struct tc_skbmod) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) [TCA_SKBMOD_DMAC] = { .len = ETH_ALEN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) [TCA_SKBMOD_SMAC] = { .len = ETH_ALEN },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) [TCA_SKBMOD_ETYPE] = { .type = NLA_U16 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct nlattr *est, struct tc_action **a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) int ovr, int bind, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct tcf_proto *tp, u32 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct tc_action_net *tn = net_generic(net, skbmod_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct nlattr *tb[TCA_SKBMOD_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct tcf_skbmod_params *p, *p_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct tcf_chain *goto_ch = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct tc_skbmod *parm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) u32 lflags = 0, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) struct tcf_skbmod *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) bool exists = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) u8 *daddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u8 *saddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) u16 eth_type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int ret = 0, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (!nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) err = nla_parse_nested_deprecated(tb, TCA_SKBMOD_MAX, nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) skbmod_policy, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (!tb[TCA_SKBMOD_PARMS])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (tb[TCA_SKBMOD_DMAC]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) daddr = nla_data(tb[TCA_SKBMOD_DMAC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) lflags |= SKBMOD_F_DMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (tb[TCA_SKBMOD_SMAC]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) saddr = nla_data(tb[TCA_SKBMOD_SMAC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) lflags |= SKBMOD_F_SMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (tb[TCA_SKBMOD_ETYPE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) eth_type = nla_get_u16(tb[TCA_SKBMOD_ETYPE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) lflags |= SKBMOD_F_ETYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) parm = nla_data(tb[TCA_SKBMOD_PARMS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) index = parm->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (parm->flags & SKBMOD_F_SWAPMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) lflags = SKBMOD_F_SWAPMAC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) err = tcf_idr_check_alloc(tn, &index, a, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) exists = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (exists && bind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (!lflags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) if (exists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) tcf_idr_release(*a, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) tcf_idr_cleanup(tn, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (!exists) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) ret = tcf_idr_create(tn, index, est, a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) &act_skbmod_ops, bind, true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) tcf_idr_cleanup(tn, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ret = ACT_P_CREATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) } else if (!ovr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) tcf_idr_release(*a, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) goto release_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) d = to_skbmod(*a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (unlikely(!p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) goto put_chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) p->flags = lflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (ovr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) spin_lock_bh(&d->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* Protected by tcf_lock if overwriting existing action. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) p_old = rcu_dereference_protected(d->skbmod_p, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (lflags & SKBMOD_F_DMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) ether_addr_copy(p->eth_dst, daddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (lflags & SKBMOD_F_SMAC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) ether_addr_copy(p->eth_src, saddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (lflags & SKBMOD_F_ETYPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) p->eth_type = htons(eth_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) rcu_assign_pointer(d->skbmod_p, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (ovr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) spin_unlock_bh(&d->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (p_old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) kfree_rcu(p_old, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (goto_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) tcf_chain_put_by_act(goto_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) put_chain:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (goto_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) tcf_chain_put_by_act(goto_ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) release_idr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) tcf_idr_release(*a, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static void tcf_skbmod_cleanup(struct tc_action *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct tcf_skbmod *d = to_skbmod(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) struct tcf_skbmod_params *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) p = rcu_dereference_protected(d->skbmod_p, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) kfree_rcu(p, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) int bind, int ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct tcf_skbmod *d = to_skbmod(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) unsigned char *b = skb_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct tcf_skbmod_params *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) struct tc_skbmod opt = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) .index = d->tcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) .refcnt = refcount_read(&d->tcf_refcnt) - ref,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) .bindcnt = atomic_read(&d->tcf_bindcnt) - bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) struct tcf_t t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) spin_lock_bh(&d->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) opt.action = d->tcf_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) p = rcu_dereference_protected(d->skbmod_p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) lockdep_is_held(&d->tcf_lock));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) opt.flags = p->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (nla_put(skb, TCA_SKBMOD_PARMS, sizeof(opt), &opt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if ((p->flags & SKBMOD_F_DMAC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) nla_put(skb, TCA_SKBMOD_DMAC, ETH_ALEN, p->eth_dst))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if ((p->flags & SKBMOD_F_SMAC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) nla_put(skb, TCA_SKBMOD_SMAC, ETH_ALEN, p->eth_src))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if ((p->flags & SKBMOD_F_ETYPE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) nla_put_u16(skb, TCA_SKBMOD_ETYPE, ntohs(p->eth_type)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) tcf_tm_dump(&t, &d->tcf_tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (nla_put_64bit(skb, TCA_SKBMOD_TM, sizeof(t), &t, TCA_SKBMOD_PAD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) spin_unlock_bh(&d->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) spin_unlock_bh(&d->tcf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) nlmsg_trim(skb, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static int tcf_skbmod_walker(struct net *net, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct netlink_callback *cb, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) const struct tc_action_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct tc_action_net *tn = net_generic(net, skbmod_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return tcf_generic_walker(tn, skb, cb, type, ops, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static int tcf_skbmod_search(struct net *net, struct tc_action **a, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct tc_action_net *tn = net_generic(net, skbmod_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return tcf_idr_search(tn, a, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static struct tc_action_ops act_skbmod_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) .kind = "skbmod",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) .id = TCA_ACT_SKBMOD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) .act = tcf_skbmod_act,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) .dump = tcf_skbmod_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) .init = tcf_skbmod_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) .cleanup = tcf_skbmod_cleanup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) .walk = tcf_skbmod_walker,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) .lookup = tcf_skbmod_search,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) .size = sizeof(struct tcf_skbmod),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static __net_init int skbmod_init_net(struct net *net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) struct tc_action_net *tn = net_generic(net, skbmod_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return tc_action_net_init(net, tn, &act_skbmod_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static void __net_exit skbmod_exit_net(struct list_head *net_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) tc_action_net_exit(net_list, skbmod_net_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static struct pernet_operations skbmod_net_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) .init = skbmod_init_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) .exit_batch = skbmod_exit_net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) .id = &skbmod_net_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) .size = sizeof(struct tc_action_net),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) MODULE_AUTHOR("Jamal Hadi Salim, <jhs@mojatatu.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) MODULE_DESCRIPTION("SKB data mod-ing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static int __init skbmod_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return tcf_register_action(&act_skbmod_ops, &skbmod_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static void __exit skbmod_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) tcf_unregister_action(&act_skbmod_ops, &skbmod_net_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) module_init(skbmod_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) module_exit(skbmod_cleanup_module);