^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * net/sched/act_api.c Packet action API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Jamal Hadi Salim
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kmod.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <net/sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/sch_generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/act_api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static void tcf_action_goto_chain_exec(const struct tc_action *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct tcf_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) res->goto_tp = rcu_dereference_bh(chain->filter_chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static void tcf_free_cookie_rcu(struct rcu_head *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct tc_cookie *cookie = container_of(p, struct tc_cookie, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) kfree(cookie->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) kfree(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct tc_cookie *new_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct tc_cookie *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) old = xchg((__force struct tc_cookie **)old_cookie, new_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) if (old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) call_rcu(&old->rcu, tcf_free_cookie_rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct tcf_chain **newchain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u32 chain_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (!opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) NL_SET_ERR_MSG(extack, "invalid control action");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) chain_index = action & TC_ACT_EXT_VAL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (!tp || !newchain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) "can't goto NULL proto/chain");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) goto end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (!*newchain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) NL_SET_ERR_MSG(extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) "can't allocate goto_chain");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) end:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) EXPORT_SYMBOL(tcf_action_check_ctrlact);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) struct tcf_chain *goto_chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) a->tcfa_action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) goto_chain = rcu_replace_pointer(a->goto_chain, goto_chain, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return goto_chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) EXPORT_SYMBOL(tcf_action_set_ctrlact);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* XXX: For standalone actions, we don't need a RCU grace period either, because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * actions are always connected to filters and filters are already destroyed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * RCU callbacks, so after a RCU grace period actions are already disconnected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * from filters. Readers later can not find us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static void free_tcf(struct tc_action *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) free_percpu(p->cpu_bstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) free_percpu(p->cpu_bstats_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) free_percpu(p->cpu_qstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) tcf_set_action_cookie(&p->act_cookie, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (chain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) tcf_chain_put_by_act(chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static void tcf_action_cleanup(struct tc_action *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (p->ops->cleanup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) p->ops->cleanup(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) gen_kill_estimator(&p->tcfa_rate_est);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) free_tcf(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static int __tcf_action_put(struct tc_action *p, bool bind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct tcf_idrinfo *idrinfo = p->idrinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if (refcount_dec_and_mutex_lock(&p->tcfa_refcnt, &idrinfo->lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (bind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) atomic_dec(&p->tcfa_bindcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) idr_remove(&idrinfo->action_idr, p->tcfa_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) mutex_unlock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) tcf_action_cleanup(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (bind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) atomic_dec(&p->tcfa_bindcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Release with strict==1 and bind==0 is only called through act API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * interface (classifiers always bind). Only case when action with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * positive reference count and zero bind count can exist is when it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * also created with act API (unbinding last classifier will destroy the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * action if it was created by classifier). So only case when bind count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * can be changed after initial check is when unbound action is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * destroyed by act API while classifier binds to action with same id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) * concurrently. This result either creation of new action(same behavior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * as before), or reusing existing action if concurrent process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * increments reference count before action is deleted. Both scenarios
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * are acceptable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (!bind && strict && atomic_read(&p->tcfa_bindcnt) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (__tcf_action_put(p, bind))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) ret = ACT_P_DELETED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) int tcf_idr_release(struct tc_action *a, bool bind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) const struct tc_action_ops *ops = a->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ret = __tcf_idr_release(a, bind, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (ret == ACT_P_DELETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) module_put(ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) EXPORT_SYMBOL(tcf_idr_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct tc_cookie *act_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) u32 cookie_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) act_cookie = rcu_dereference(act->act_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (act_cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) cookie_len = nla_total_size(act_cookie->len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) return nla_total_size(0) /* action number nested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) + nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) + cookie_len /* TCA_ACT_COOKIE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_HW_STATS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) + nla_total_size(0) /* TCA_ACT_STATS nested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* TCA_STATS_BASIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) + nla_total_size_64bit(sizeof(struct gnet_stats_basic))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* TCA_STATS_PKT64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) + nla_total_size_64bit(sizeof(u64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* TCA_STATS_QUEUE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) + nla_total_size_64bit(sizeof(struct gnet_stats_queue))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) + nla_total_size(0) /* TCA_OPTIONS nested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) + nla_total_size(sizeof(struct tcf_t)); /* TCA_GACT_TM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static size_t tcf_action_full_attrs_size(size_t sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return NLMSG_HDRLEN /* struct nlmsghdr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) + sizeof(struct tcamsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) + nla_total_size(0) /* TCA_ACT_TAB nested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) + sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static size_t tcf_action_fill_size(const struct tc_action *act)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) size_t sz = tcf_action_shared_attrs_size(act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (act->ops->get_fill_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return act->ops->get_fill_size(act) + sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) int err = 0, index = -1, s_i = 0, n_i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) u32 act_flags = cb->args[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) unsigned long jiffy_since = cb->args[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct idr *idr = &idrinfo->action_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct tc_action *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) unsigned long id = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) mutex_lock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) s_i = cb->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) idr_for_each_entry_ul(idr, p, tmp, id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (index < s_i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (IS_ERR(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (jiffy_since &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) time_after(jiffy_since,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) (unsigned long)p->tcfa_tm.lastuse))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) nest = nla_nest_start_noflag(skb, n_i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (!nest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) index--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) err = tcf_action_dump_1(skb, p, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) index--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) nlmsg_trim(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) n_i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if (!(act_flags & TCA_FLAG_LARGE_DUMP_ON) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) n_i >= TCA_ACT_MAX_PRIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (index >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) cb->args[0] = index + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) mutex_unlock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (n_i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (act_flags & TCA_FLAG_LARGE_DUMP_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) cb->args[1] = n_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return n_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static int tcf_idr_release_unsafe(struct tc_action *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (atomic_read(&p->tcfa_bindcnt) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (refcount_dec_and_test(&p->tcfa_refcnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) idr_remove(&p->idrinfo->action_idr, p->tcfa_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) tcf_action_cleanup(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return ACT_P_DELETED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static int tcf_del_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) const struct tc_action_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) int n_i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct idr *idr = &idrinfo->action_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct tc_action *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned long id = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) nest = nla_nest_start_noflag(skb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (nest == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (nla_put_string(skb, TCA_KIND, ops->kind))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) mutex_lock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) idr_for_each_entry_ul(idr, p, tmp, id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (IS_ERR(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) ret = tcf_idr_release_unsafe(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (ret == ACT_P_DELETED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) module_put(ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) n_i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) } else if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) mutex_unlock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) mutex_unlock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) ret = nla_put_u32(skb, TCA_FCNT, n_i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return n_i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct netlink_callback *cb, int type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) const struct tc_action_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct tcf_idrinfo *idrinfo = tn->idrinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (type == RTM_DELACTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return tcf_del_walker(idrinfo, skb, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) } else if (type == RTM_GETACTION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return tcf_dump_walker(idrinfo, skb, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) WARN(1, "tcf_generic_walker: unknown command %d\n", type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) NL_SET_ERR_MSG(extack, "tcf_generic_walker: unknown command");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) EXPORT_SYMBOL(tcf_generic_walker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct tcf_idrinfo *idrinfo = tn->idrinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct tc_action *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) mutex_lock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) p = idr_find(&idrinfo->action_idr, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (IS_ERR(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) p = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) else if (p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) refcount_inc(&p->tcfa_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) mutex_unlock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) *a = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) EXPORT_SYMBOL(tcf_idr_search);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static int tcf_idr_delete_index(struct tcf_idrinfo *idrinfo, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct tc_action *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) mutex_lock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) p = idr_find(&idrinfo->action_idr, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) mutex_unlock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (!atomic_read(&p->tcfa_bindcnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (refcount_dec_and_test(&p->tcfa_refcnt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) struct module *owner = p->ops->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) WARN_ON(p != idr_remove(&idrinfo->action_idr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) p->tcfa_index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) mutex_unlock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) tcf_action_cleanup(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) module_put(owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) ret = -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) mutex_unlock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct tc_action **a, const struct tc_action_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) int bind, bool cpustats, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct tc_action *p = kzalloc(ops->size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct tcf_idrinfo *idrinfo = tn->idrinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (unlikely(!p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) refcount_set(&p->tcfa_refcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) if (bind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) atomic_set(&p->tcfa_bindcnt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (cpustats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) p->cpu_bstats = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (!p->cpu_bstats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) p->cpu_bstats_hw = netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) if (!p->cpu_bstats_hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) p->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (!p->cpu_qstats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) goto err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) spin_lock_init(&p->tcfa_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) p->tcfa_index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) p->tcfa_tm.install = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) p->tcfa_tm.lastuse = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) p->tcfa_tm.firstuse = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) p->tcfa_flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (est) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) err = gen_new_estimator(&p->tcfa_bstats, p->cpu_bstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) &p->tcfa_rate_est,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) &p->tcfa_lock, NULL, est);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) goto err4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) p->idrinfo = idrinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) __module_get(ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) p->ops = ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *a = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) err4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) free_percpu(p->cpu_qstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) err3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) free_percpu(p->cpu_bstats_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) free_percpu(p->cpu_bstats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) EXPORT_SYMBOL(tcf_idr_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct nlattr *est, struct tc_action **a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) const struct tc_action_ops *ops, int bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /* Set cpustats according to actions flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return tcf_idr_create(tn, index, est, a, ops, bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) !(flags & TCA_ACT_FLAGS_NO_PERCPU_STATS), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) EXPORT_SYMBOL(tcf_idr_create_from_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) /* Cleanup idr index that was allocated but not initialized. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) void tcf_idr_cleanup(struct tc_action_net *tn, u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct tcf_idrinfo *idrinfo = tn->idrinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) mutex_lock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /* Remove ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) WARN_ON(!IS_ERR(idr_remove(&idrinfo->action_idr, index)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) mutex_unlock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) EXPORT_SYMBOL(tcf_idr_cleanup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) /* Check if action with specified index exists. If actions is found, increments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * its reference and bind counters, and return 1. Otherwise insert temporary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * error pointer (to prevent concurrent users from inserting actions with same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * index) and return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) struct tc_action **a, int bind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct tcf_idrinfo *idrinfo = tn->idrinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct tc_action *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) mutex_lock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (*index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) p = idr_find(&idrinfo->action_idr, *index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (IS_ERR(p)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) /* This means that another process allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * index but did not assign the pointer yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) mutex_unlock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) refcount_inc(&p->tcfa_refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (bind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) atomic_inc(&p->tcfa_bindcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *a = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) *a = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) *index, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) idr_replace(&idrinfo->action_idr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ERR_PTR(-EBUSY), *index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) *index = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) *a = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) ret = idr_alloc_u32(&idrinfo->action_idr, NULL, index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) UINT_MAX, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) idr_replace(&idrinfo->action_idr, ERR_PTR(-EBUSY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) *index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) mutex_unlock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) EXPORT_SYMBOL(tcf_idr_check_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) struct tcf_idrinfo *idrinfo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct idr *idr = &idrinfo->action_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct tc_action *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) unsigned long id = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) unsigned long tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) idr_for_each_entry_ul(idr, p, tmp, id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) ret = __tcf_idr_release(p, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (ret == ACT_P_DELETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) module_put(ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) idr_destroy(&idrinfo->action_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) EXPORT_SYMBOL(tcf_idrinfo_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) static LIST_HEAD(act_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static DEFINE_RWLOCK(act_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) int tcf_register_action(struct tc_action_ops *act,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct pernet_operations *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct tc_action_ops *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) /* We have to register pernet ops before making the action ops visible,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) * otherwise tcf_action_init_1() could get a partially initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * netns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) ret = register_pernet_subsys(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) write_lock(&act_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) list_for_each_entry(a, &act_base, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (act->id == a->id || (strcmp(act->kind, a->kind) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) write_unlock(&act_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) unregister_pernet_subsys(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) list_add_tail(&act->head, &act_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) write_unlock(&act_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) EXPORT_SYMBOL(tcf_register_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) int tcf_unregister_action(struct tc_action_ops *act,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct pernet_operations *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct tc_action_ops *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) int err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) write_lock(&act_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) list_for_each_entry(a, &act_base, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (a == act) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) list_del(&act->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) write_unlock(&act_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) unregister_pernet_subsys(ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) EXPORT_SYMBOL(tcf_unregister_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /* lookup by name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) static struct tc_action_ops *tc_lookup_action_n(char *kind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct tc_action_ops *a, *res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) read_lock(&act_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) list_for_each_entry(a, &act_base, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (strcmp(kind, a->kind) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (try_module_get(a->owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) res = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) read_unlock(&act_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /* lookup by nlattr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct tc_action_ops *a, *res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) read_lock(&act_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) list_for_each_entry(a, &act_base, head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (nla_strcmp(kind, a->kind) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) if (try_module_get(a->owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) res = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) read_unlock(&act_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /*TCA_ACT_MAX_PRIO is 32, there count upto 32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) #define TCA_ACT_MAX_PRIO_MASK 0x1FF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) int nr_actions, struct tcf_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) u32 jmp_prgcnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) u32 jmp_ttl = TCA_ACT_MAX_PRIO; /*matches actions per filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) int ret = TC_ACT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) if (skb_skip_tc_classify(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return TC_ACT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) restart_act_graph:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) for (i = 0; i < nr_actions; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) const struct tc_action *a = actions[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) int repeat_ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (jmp_prgcnt > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) jmp_prgcnt -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) repeat_ttl = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) ret = a->ops->act(skb, a, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (unlikely(ret == TC_ACT_REPEAT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (--repeat_ttl != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /* suspicious opcode, stop pipeline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) net_warn_ratelimited("TC_ACT_REPEAT abuse ?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return TC_ACT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (TC_ACT_EXT_CMP(ret, TC_ACT_JUMP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) jmp_prgcnt = ret & TCA_ACT_MAX_PRIO_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (!jmp_prgcnt || (jmp_prgcnt > nr_actions)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) /* faulty opcode, stop pipeline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) return TC_ACT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) jmp_ttl -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (jmp_ttl > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) goto restart_act_graph;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) else /* faulty graph, stop pipeline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return TC_ACT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) if (unlikely(!rcu_access_pointer(a->goto_chain))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) net_warn_ratelimited("can't go to NULL chain!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) return TC_ACT_SHOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) tcf_action_goto_chain_exec(a, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (ret != TC_ACT_PIPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) EXPORT_SYMBOL(tcf_action_exec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) int tcf_action_destroy(struct tc_action *actions[], int bind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) const struct tc_action_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct tc_action *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) int ret = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) a = actions[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) actions[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) ops = a->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) ret = __tcf_idr_release(a, bind, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (ret == ACT_P_DELETED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) module_put(ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) else if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) static int tcf_action_put(struct tc_action *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) return __tcf_action_put(p, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /* Put all actions in this array, skip those NULL's. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) static void tcf_action_put_many(struct tc_action *actions[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct tc_action *a = actions[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) const struct tc_action_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (!a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ops = a->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) if (tcf_action_put(a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) module_put(ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return a->ops->dump(skb, a, bind, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) tcf_action_dump_terse(struct sk_buff *skb, struct tc_action *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) unsigned char *b = skb_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct tc_cookie *cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) if (nla_put_string(skb, TCA_KIND, a->ops->kind))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (tcf_action_copy_stats(skb, a, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) cookie = rcu_dereference(a->act_cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) if (cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (nla_put(skb, TCA_ACT_COOKIE, cookie->len, cookie->data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) nlmsg_trim(skb, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) unsigned char *b = skb_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) if (tcf_action_dump_terse(skb, a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) a->hw_stats, TCA_ACT_HW_STATS_ANY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) if (a->used_hw_stats_valid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (a->tcfa_flags &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) nla_put_bitfield32(skb, TCA_ACT_FLAGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) a->tcfa_flags, a->tcfa_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (nest == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) err = tcf_action_dump_old(skb, a, bind, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (err > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) nlmsg_trim(skb, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) EXPORT_SYMBOL(tcf_action_dump_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) int bind, int ref, bool terse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct tc_action *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) int err = -EINVAL, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) a = actions[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) nest = nla_nest_start_noflag(skb, i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (nest == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) err = terse ? tcf_action_dump_terse(skb, a) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) tcf_action_dump_1(skb, a, bind, ref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) struct tc_cookie *c = kzalloc(sizeof(*c), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (!c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) c->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) if (!c->data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) kfree(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) c->len = nla_len(tb[TCA_ACT_COOKIE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) static u8 tcf_action_hw_stats_get(struct nlattr *hw_stats_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) struct nla_bitfield32 hw_stats_bf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /* If the user did not pass the attr, that means he does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * not care about the type. Return "any" in that case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * which is setting on all supported types.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) if (!hw_stats_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) return TCA_ACT_HW_STATS_ANY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) hw_stats_bf = nla_get_bitfield32(hw_stats_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return hw_stats_bf.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) [TCA_ACT_KIND] = { .type = NLA_STRING },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) [TCA_ACT_INDEX] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) [TCA_ACT_COOKIE] = { .type = NLA_BINARY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) .len = TC_COOKIE_MAX_SIZE },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) [TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) [TCA_ACT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_ACT_FLAGS_NO_PERCPU_STATS),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) [TCA_ACT_HW_STATS] = NLA_POLICY_BITFIELD32(TCA_ACT_HW_STATS_ANY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) void tcf_idr_insert_many(struct tc_action *actions[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct tc_action *a = actions[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) struct tcf_idrinfo *idrinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) if (!a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) idrinfo = a->idrinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) mutex_lock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /* Replace ERR_PTR(-EBUSY) allocated by tcf_idr_check_alloc if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * it is just created, otherwise this is just a nop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) idr_replace(&idrinfo->action_idr, a, a->tcfa_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) mutex_unlock(&idrinfo->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) struct tc_action_ops *tc_action_load_ops(char *name, struct nlattr *nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) struct nlattr *tb[TCA_ACT_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct tc_action_ops *a_o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) char act_name[IFNAMSIZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct nlattr *kind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (name == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) tcf_action_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) kind = tb[TCA_ACT_KIND];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (!kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) NL_SET_ERR_MSG(extack, "TC action kind must be specified");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) NL_SET_ERR_MSG(extack, "TC action name too long");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) NL_SET_ERR_MSG(extack, "TC action name too long");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) a_o = tc_lookup_action_n(act_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) if (a_o == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) if (rtnl_held)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) request_module("act_%s", act_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) if (rtnl_held)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) a_o = tc_lookup_action_n(act_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) /* We dropped the RTNL semaphore in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * perform the module load. So, even if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * succeeded in loading the module we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * tell the caller to replay the request. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * indicate this using -EAGAIN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) if (a_o != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) module_put(a_o->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) return ERR_PTR(-EAGAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) NL_SET_ERR_MSG(extack, "Failed to load TC action module");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) return ERR_PTR(-ENOENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return a_o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) struct nlattr *nla, struct nlattr *est,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) char *name, int ovr, int bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct tc_action_ops *a_o, int *init_res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) struct nla_bitfield32 flags = { 0, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) u8 hw_stats = TCA_ACT_HW_STATS_ANY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) struct nlattr *tb[TCA_ACT_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) struct tc_cookie *cookie = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct tc_action *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /* backward compatibility for policer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (name == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) tcf_action_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) if (tb[TCA_ACT_COOKIE]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) cookie = nla_memdup_cookie(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) if (!cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) NL_SET_ERR_MSG(extack, "No memory to generate TC cookie");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) hw_stats = tcf_action_hw_stats_get(tb[TCA_ACT_HW_STATS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) if (tb[TCA_ACT_FLAGS])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) flags = nla_get_bitfield32(tb[TCA_ACT_FLAGS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) rtnl_held, tp, flags.value, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) tp, flags.value, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) *init_res = err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) if (!name && tb[TCA_ACT_COOKIE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) tcf_set_action_cookie(&a->act_cookie, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) a->hw_stats = hw_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) return a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) if (cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) kfree(cookie->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) kfree(cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) /* Returns numbers of initialized actions or negative error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) struct nlattr *est, char *name, int ovr, int bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) struct tc_action *actions[], int init_res[], size_t *attr_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) bool rtnl_held, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) struct tc_action *act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) size_t sz = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) struct tc_action_ops *a_o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) a_o = tc_action_load_ops(name, tb[i], rtnl_held, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) if (IS_ERR(a_o)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) err = PTR_ERR(a_o);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) goto err_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) ops[i - 1] = a_o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) act = tcf_action_init_1(net, tp, tb[i], est, name, ovr, bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) ops[i - 1], &init_res[i - 1], rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (IS_ERR(act)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) err = PTR_ERR(act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) sz += tcf_action_fill_size(act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /* Start from index 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) actions[i - 1] = act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) /* We have to commit them all together, because if any error happened in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * between, we could not handle the failure gracefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) tcf_idr_insert_many(actions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) *attr_size = tcf_action_full_attrs_size(sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) err = i - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) goto err_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) tcf_action_destroy(actions, bind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) err_mod:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) for (i = 0; i < TCA_ACT_MAX_PRIO; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) if (ops[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) module_put(ops[i]->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) void tcf_action_update_stats(struct tc_action *a, u64 bytes, u64 packets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) u64 drops, bool hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) if (a->cpu_bstats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), bytes, packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) this_cpu_ptr(a->cpu_qstats)->drops += drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) if (hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) _bstats_cpu_update(this_cpu_ptr(a->cpu_bstats_hw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) bytes, packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) _bstats_update(&a->tcfa_bstats, bytes, packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) a->tcfa_qstats.drops += drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) if (hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) _bstats_update(&a->tcfa_bstats_hw, bytes, packets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) EXPORT_SYMBOL(tcf_action_update_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) int compat_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) struct gnet_dump d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (p == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /* compat_mode being true specifies a call that is supposed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * to add additional backward compatibility statistic TLVs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (compat_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (p->type == TCA_OLD_COMPAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) err = gnet_stats_start_copy_compat(skb, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) TCA_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) TCA_XSTATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) &p->tcfa_lock, &d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) TCA_PAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) &p->tcfa_lock, &d, TCA_ACT_PAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) if (gnet_stats_copy_basic(NULL, &d, p->cpu_bstats, &p->tcfa_bstats) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) gnet_stats_copy_basic_hw(NULL, &d, p->cpu_bstats_hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) &p->tcfa_bstats_hw) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) gnet_stats_copy_rate_est(&d, &p->tcfa_rate_est) < 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) gnet_stats_copy_queue(&d, p->cpu_qstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) &p->tcfa_qstats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) p->tcfa_qstats.qlen) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (gnet_stats_finish_copy(&d) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) u32 portid, u32 seq, u16 flags, int event, int bind,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) int ref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) struct tcamsg *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) unsigned char *b = skb_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (!nlh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) goto out_nlmsg_trim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) t = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) t->tca_family = AF_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) t->tca__pad1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) t->tca__pad2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) if (!nest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) goto out_nlmsg_trim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) if (tcf_action_dump(skb, actions, bind, ref, false) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) goto out_nlmsg_trim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) nlh->nlmsg_len = skb_tail_pointer(skb) - b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) out_nlmsg_trim:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) nlmsg_trim(skb, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) tcf_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) struct tc_action *actions[], int event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 0, 1) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) return rtnl_unicast(skb, net, portid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) static struct tc_action *tcf_action_get_1(struct net *net, struct nlattr *nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) struct nlmsghdr *n, u32 portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) struct nlattr *tb[TCA_ACT_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) const struct tc_action_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) struct tc_action *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) int index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) tcf_action_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) if (tb[TCA_ACT_INDEX] == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) NL_SET_ERR_MSG(extack, "Invalid TC action index value");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) index = nla_get_u32(tb[TCA_ACT_INDEX]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) ops = tc_lookup_action(tb[TCA_ACT_KIND]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) if (!ops) { /* could happen in batch of actions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) NL_SET_ERR_MSG(extack, "Specified TC action kind not found");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (ops->lookup(net, &a, index) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) NL_SET_ERR_MSG(extack, "TC action with specified index not found");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) goto err_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) module_put(ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) return a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) err_mod:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) module_put(ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) static int tca_action_flush(struct net *net, struct nlattr *nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) struct nlmsghdr *n, u32 portid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) unsigned char *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) struct tcamsg *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) struct netlink_callback dcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) struct nlattr *tb[TCA_ACT_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) const struct tc_action_ops *ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct nlattr *kind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) b = skb_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX, nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) tcf_action_policy, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) kind = tb[TCA_ACT_KIND];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) ops = tc_lookup_action(kind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (!ops) { /*some idjot trying to flush unknown action */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) NL_SET_ERR_MSG(extack, "Cannot flush unknown TC action");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) sizeof(*t), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) if (!nlh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) NL_SET_ERR_MSG(extack, "Failed to create TC action flush notification");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) goto out_module_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) t = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) t->tca_family = AF_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) t->tca__pad1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) t->tca__pad2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (!nest) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) NL_SET_ERR_MSG(extack, "Failed to add new netlink message");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) goto out_module_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) err = ops->walk(net, skb, &dcb, RTM_DELACTION, ops, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (err <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) goto out_module_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) nlh->nlmsg_len = skb_tail_pointer(skb) - b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) nlh->nlmsg_flags |= NLM_F_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) module_put(ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) n->nlmsg_flags & NLM_F_ECHO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (err > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) NL_SET_ERR_MSG(extack, "Failed to send TC action flush notification");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) out_module_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) module_put(ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) static int tcf_action_delete(struct net *net, struct tc_action *actions[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) for (i = 0; i < TCA_ACT_MAX_PRIO && actions[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) struct tc_action *a = actions[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) const struct tc_action_ops *ops = a->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) /* Actions can be deleted concurrently so we must save their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * type and id to search again after reference is released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) struct tcf_idrinfo *idrinfo = a->idrinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) u32 act_index = a->tcfa_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) actions[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (tcf_action_put(a)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /* last reference, action was deleted concurrently */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) module_put(ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) /* now do the delete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) ret = tcf_idr_delete_index(idrinfo, act_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) tcf_del_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 0, 2) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) NL_SET_ERR_MSG(extack, "Failed to fill netlink TC action attributes");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) /* now do the delete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) ret = tcf_action_delete(net, actions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) NL_SET_ERR_MSG(extack, "Failed to delete TC action");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) n->nlmsg_flags & NLM_F_ECHO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) if (ret > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) u32 portid, int event, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) struct tc_action *act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) size_t attr_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) ret = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) if (tb[1])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) return tca_action_flush(net, tb[1], n, portid, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) NL_SET_ERR_MSG(extack, "Invalid netlink attributes while flushing TC action");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) act = tcf_action_get_1(net, tb[i], n, portid, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) if (IS_ERR(act)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) ret = PTR_ERR(act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) attr_size += tcf_action_fill_size(act);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) actions[i - 1] = act;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) attr_size = tcf_action_full_attrs_size(attr_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) if (event == RTM_GETACTION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) ret = tcf_get_notify(net, portid, n, actions, event, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) else { /* delete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) ret = tcf_del_notify(net, n, actions, portid, attr_size, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) tcf_action_put_many(actions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) tcf_add_notify(struct net *net, struct nlmsghdr *n, struct tc_action *actions[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) u32 portid, size_t attr_size, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) skb = alloc_skb(attr_size <= NLMSG_GOODSIZE ? NLMSG_GOODSIZE : attr_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (!skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) RTM_NEWACTION, 0, 0) <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) NL_SET_ERR_MSG(extack, "Failed to fill netlink attributes while adding TC action");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) kfree_skb(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) n->nlmsg_flags & NLM_F_ECHO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) if (err > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) static int tcf_action_add(struct net *net, struct nlattr *nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) struct nlmsghdr *n, u32 portid, int ovr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) size_t attr_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) int loop, ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) int init_res[TCA_ACT_MAX_PRIO] = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) for (loop = 0; loop < 10; loop++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) actions, init_res, &attr_size, true, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (ret != -EAGAIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /* only put existing actions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) for (i = 0; i < TCA_ACT_MAX_PRIO; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (init_res[i] == ACT_P_CREATED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) actions[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) tcf_action_put_many(actions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static const struct nla_policy tcaa_policy[TCA_ROOT_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) [TCA_ROOT_FLAGS] = NLA_POLICY_BITFIELD32(TCA_FLAG_LARGE_DUMP_ON),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) [TCA_ROOT_TIME_DELTA] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) struct nlattr *tca[TCA_ROOT_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) u32 portid = NETLINK_CB(skb).portid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) int ret = 0, ovr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if ((n->nlmsg_type != RTM_GETACTION) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) !netlink_capable(skb, CAP_NET_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) ret = nlmsg_parse_deprecated(n, sizeof(struct tcamsg), tca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) TCA_ROOT_MAX, NULL, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) if (tca[TCA_ACT_TAB] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) NL_SET_ERR_MSG(extack, "Netlink action attributes missing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /* n->nlmsg_flags & NLM_F_CREATE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) switch (n->nlmsg_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) case RTM_NEWACTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) /* we are going to assume all other flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * imply create only if it doesn't exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * Note that CREATE | EXCL implies that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) * but since we want avoid ambiguity (eg when flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * is zero) then just set this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (n->nlmsg_flags & NLM_F_REPLACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) ovr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) case RTM_DELACTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) portid, RTM_DELACTION, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) case RTM_GETACTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) portid, RTM_GETACTION, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) static struct nlattr *find_dump_kind(struct nlattr **nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) struct nlattr *kind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) tb1 = nla[TCA_ACT_TAB];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (tb1 == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (nla_parse_deprecated(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), NLMSG_ALIGN(nla_len(tb1)), NULL, NULL) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) if (tb[1] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (nla_parse_nested_deprecated(tb2, TCA_ACT_MAX, tb[1], tcf_action_policy, NULL) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) kind = tb2[TCA_ACT_KIND];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) return kind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static int tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) struct net *net = sock_net(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) struct nlmsghdr *nlh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) unsigned char *b = skb_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) struct tc_action_ops *a_o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) struct nlattr *tb[TCA_ROOT_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) struct nlattr *count_attr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) unsigned long jiffy_since = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) struct nlattr *kind = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct nla_bitfield32 bf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) u32 msecs_since = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) u32 act_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) ret = nlmsg_parse_deprecated(cb->nlh, sizeof(struct tcamsg), tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) TCA_ROOT_MAX, tcaa_policy, cb->extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) kind = find_dump_kind(tb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (kind == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) pr_info("tc_dump_action: action bad kind\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) a_o = tc_lookup_action(kind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (a_o == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) cb->args[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (tb[TCA_ROOT_FLAGS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) bf = nla_get_bitfield32(tb[TCA_ROOT_FLAGS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) cb->args[2] = bf.value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if (tb[TCA_ROOT_TIME_DELTA]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) msecs_since = nla_get_u32(tb[TCA_ROOT_TIME_DELTA]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) cb->nlh->nlmsg_type, sizeof(*t), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) if (!nlh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) goto out_module_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) if (msecs_since)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) jiffy_since = jiffies - msecs_to_jiffies(msecs_since);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) t = nlmsg_data(nlh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) t->tca_family = AF_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) t->tca__pad1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) t->tca__pad2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) cb->args[3] = jiffy_since;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) count_attr = nla_reserve(skb, TCA_ROOT_COUNT, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) if (!count_attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) goto out_module_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) nest = nla_nest_start_noflag(skb, TCA_ACT_TAB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) if (nest == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) goto out_module_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) ret = a_o->walk(net, skb, cb, RTM_GETACTION, a_o, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) goto out_module_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) ret = skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) act_count = cb->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) memcpy(nla_data(count_attr), &act_count, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) cb->args[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) nlmsg_trim(skb, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) nlh->nlmsg_len = skb_tail_pointer(skb) - b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (NETLINK_CB(cb->skb).portid && ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) nlh->nlmsg_flags |= NLM_F_MULTI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) module_put(a_o->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) out_module_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) module_put(a_o->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) nlmsg_trim(skb, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) static int __init tc_action_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) subsys_initcall(tc_action_init);