^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __NET_PKT_CLS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __NET_PKT_CLS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <net/sch_generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <net/act_api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <net/net_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /* TC action not accessible from user space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define TC_ACT_CONSUMED (TC_ACT_VALUE_MAX + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /* Basic packet classifier frontend definitions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct tcf_walker {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) int stop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) int skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) bool nonempty;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned long cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) int register_tcf_proto_ops(struct tcf_proto_ops *ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct tcf_block_ext_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) enum flow_block_binder_type binder_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) tcf_chain_head_change_t *chain_head_change;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) void *chain_head_change_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) u32 block_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) struct tcf_qevent {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct tcf_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct tcf_block_ext_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct tcf_proto __rcu *filter_chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct tcf_block_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #ifdef CONFIG_NET_CLS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u32 chain_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) void tcf_chain_put_by_act(struct tcf_chain *chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct tcf_chain *chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct tcf_proto *tp, bool rtnl_held);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) void tcf_block_netif_keep_dst(struct tcf_block *block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int tcf_block_get(struct tcf_block **p_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct netlink_ext_ack *extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct tcf_block_ext_info *ei,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) struct netlink_ext_ack *extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) void tcf_block_put(struct tcf_block *block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct tcf_block_ext_info *ei);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static inline bool tcf_block_shared(struct tcf_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) return block->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static inline bool tcf_block_non_null_shared(struct tcf_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return block && block->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) WARN_ON(tcf_block_shared(block));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return block->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct tcf_result *res, bool compat_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int tcf_classify_ingress(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) const struct tcf_block *ingress_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) const struct tcf_proto *tp, struct tcf_result *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) bool compat_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static inline bool tcf_block_shared(struct tcf_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static inline bool tcf_block_non_null_shared(struct tcf_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) int tcf_block_get(struct tcf_block **p_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct tcf_block_ext_info *ei,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static inline void tcf_block_put(struct tcf_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct tcf_block_ext_info *ei)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) void *cb_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void *cb_priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) struct tcf_result *res, bool compat_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return TC_ACT_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static inline int tcf_classify_ingress(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) const struct tcf_block *ingress_block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) const struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct tcf_result *res, bool compat_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return TC_ACT_UNSPEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) __cls_set_class(unsigned long *clp, unsigned long cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return xchg(clp, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned long cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) cl = __cls_set_class(&r->class, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) q->ops->cl_ops->unbind_tcf(q, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct Qdisc *q = tp->chain->block->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Check q as it is not set for shared blocks. In that case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * setting class is not supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) sch_tree_lock(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) __tcf_bind_filter(q, r, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) sch_tree_unlock(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned long cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if ((cl = __cls_set_class(&r->class, 0)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) q->ops->cl_ops->unbind_tcf(q, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct Qdisc *q = tp->chain->block->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (!q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) __tcf_unbind_filter(q, r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) struct tcf_exts {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) __u32 type; /* for backward compat(TCA_OLD_COMPAT) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) int nr_actions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct tc_action **actions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /* Map to export classifier specific extension TLV types to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * generic extensions API. Unsupported extensions must be set to 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) int action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int police;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) int action, int police)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) exts->type = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) exts->nr_actions = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) exts->net = net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!exts->actions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) exts->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) exts->police = police;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* Return false if the netns is being destroyed in cleanup_net(). Callers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * need to do cleanup synchronously in this case, otherwise may race with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * tc_action_net_exit(). Return true for other cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static inline bool tcf_exts_get_net(struct tcf_exts *exts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) exts->net = maybe_get_net(exts->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return exts->net != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static inline void tcf_exts_put_net(struct tcf_exts *exts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (exts->net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) put_net(exts->net);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #define tcf_exts_for_each_action(i, a, exts) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) #define tcf_exts_for_each_action(i, a, exts) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) for (; 0; (void)(i), (void)(a), (void)(exts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) tcf_exts_stats_update(const struct tcf_exts *exts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) u64 bytes, u64 packets, u64 drops, u64 lastuse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) u8 used_hw_stats, bool used_hw_stats_valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) for (i = 0; i < exts->nr_actions; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) struct tc_action *a = exts->actions[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) tcf_action_stats_update(a, bytes, packets, drops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) lastuse, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) a->used_hw_stats = used_hw_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) a->used_hw_stats_valid = used_hw_stats_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * tcf_exts_has_actions - check if at least one action is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * @exts: tc filter extensions handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * Returns true if at least one action is present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return exts->nr_actions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * tcf_exts_exec - execute tc filter extensions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * @skb: socket buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * @exts: tc filter extensions handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * @res: desired result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * a negative number if the filter must be considered unmatched or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * a positive action code (TC_ACT_*) which must be returned to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * underlying layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct tcf_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return TC_ACT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct nlattr **tb, struct nlattr *rate_tlv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct tcf_exts *exts, bool ovr, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) struct netlink_ext_ack *extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) void tcf_exts_destroy(struct tcf_exts *exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * struct tcf_pkt_info - packet information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) struct tcf_pkt_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) unsigned char * ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) int nexthdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) #ifdef CONFIG_NET_EMATCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct tcf_ematch_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * struct tcf_ematch - extended match (ematch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * @matchid: identifier to allow userspace to reidentify a match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * @flags: flags specifying attributes and the relation to other matches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * @ops: the operations lookup table of the corresponding ematch module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * @datalen: length of the ematch specific configuration data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * @data: ematch specific data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct tcf_ematch {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct tcf_ematch_ops * ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) unsigned long data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) unsigned int datalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) u16 matchid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) u16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static inline int tcf_em_is_container(struct tcf_ematch *em)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return !em->ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) static inline int tcf_em_is_simple(struct tcf_ematch *em)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return em->flags & TCF_EM_SIMPLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static inline int tcf_em_is_inverted(struct tcf_ematch *em)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return em->flags & TCF_EM_INVERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static inline int tcf_em_last_match(struct tcf_ematch *em)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) if (tcf_em_last_match(em))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (result == 0 && em->flags & TCF_EM_REL_AND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) if (result != 0 && em->flags & TCF_EM_REL_OR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * struct tcf_ematch_tree - ematch tree handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * @hdr: ematch tree header supplied by userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * @matches: array of ematches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) struct tcf_ematch_tree {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct tcf_ematch_tree_hdr hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) struct tcf_ematch * matches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * struct tcf_ematch_ops - ematch module operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * @kind: identifier (kind) of this ematch module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * @datalen: length of expected configuration data (optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * @change: called during validation (optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * @match: called during ematch tree evaluation, must return 1/0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * @destroy: called during destroyage (optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * @dump: called during dumping process (optional)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * @owner: owner, must be set to THIS_MODULE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * @link: link to previous/next ematch module (internal use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) struct tcf_ematch_ops {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) int kind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int datalen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) int (*change)(struct net *net, void *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) int, struct tcf_ematch *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) int (*match)(struct sk_buff *, struct tcf_ematch *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) struct tcf_pkt_info *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) void (*destroy)(struct tcf_ematch *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) int (*dump)(struct sk_buff *, struct tcf_ematch *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) struct module *owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct list_head link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) int tcf_em_register(struct tcf_ematch_ops *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) void tcf_em_unregister(struct tcf_ematch_ops *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) struct tcf_ematch_tree *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) void tcf_em_tree_destroy(struct tcf_ematch_tree *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct tcf_pkt_info *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * tcf_em_tree_match - evaulate an ematch tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * @skb: socket buffer of the packet in question
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * @tree: ematch tree to be used for evaluation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * @info: packet information examined by classifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * This function matches @skb against the ematch tree in @tree by going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * through all ematches respecting their logic relations returning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * as soon as the result is obvious.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * Returns 1 if the ematch tree as-one matches, no ematches are configured
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * or ematch is not enabled in the kernel, otherwise 0 is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) static inline int tcf_em_tree_match(struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) struct tcf_ematch_tree *tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) struct tcf_pkt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (tree->hdr.nmatches)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return __tcf_em_tree_match(skb, tree, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) #define MODULE_ALIAS_TCF_EMATCH(kind) MODULE_ALIAS("ematch-kind-" __stringify(kind))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) #else /* CONFIG_NET_EMATCH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct tcf_ematch_tree {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) #define tcf_em_tree_dump(skb, t, tlv) (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) #endif /* CONFIG_NET_EMATCH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) switch (layer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) case TCF_LAYER_LINK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return skb_mac_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) case TCF_LAYER_NETWORK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return skb_network_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) case TCF_LAYER_TRANSPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return skb_transport_header(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static inline int tcf_valid_offset(const struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) const unsigned char *ptr, const int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return likely((ptr + len) <= skb_tail_pointer(skb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) ptr >= skb->head &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) (ptr <= (ptr + len)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static inline int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) char indev[IFNAMSIZ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) "Interface name too long");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) dev = __dev_get_by_name(net, indev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) "Network device not found");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) return dev->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) tcf_match_indev(struct sk_buff *skb, int ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (!ifindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (!skb->skb_iif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return ifindex == skb->skb_iif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) int tc_setup_flow_action(struct flow_action *flow_action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) const struct tcf_exts *exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) void tc_cleanup_flow_action(struct flow_action *flow_action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) void *type_data, bool err_stop, bool rtnl_held);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) enum tc_setup_type type, void *type_data, bool err_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) enum tc_setup_type type, void *type_data, bool err_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) u32 *old_flags, unsigned int *old_in_hw_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) u32 *new_flags, unsigned int *new_in_hw_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) bool rtnl_held);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) enum tc_setup_type type, void *type_data, bool err_stop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) bool add, flow_setup_cb_t *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) enum tc_setup_type type, void *type_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) void *cb_priv, u32 *flags, unsigned int *in_hw_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) enum flow_block_binder_type binder_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) struct nlattr *block_index_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct netlink_ext_ack *extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct netlink_ext_ack *extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) struct sk_buff **to_free, int *ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) enum flow_block_binder_type binder_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct nlattr *block_index_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static inline struct sk_buff *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct sk_buff **to_free, int *ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct tc_cls_u32_knode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct tcf_exts *exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) struct tcf_result *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) struct tc_u32_sel *sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) u32 link_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) u8 fshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct tc_cls_u32_hnode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) u32 prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) unsigned int divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) enum tc_clsu32_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) TC_CLSU32_NEW_KNODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) TC_CLSU32_REPLACE_KNODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) TC_CLSU32_DELETE_KNODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) TC_CLSU32_NEW_HNODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) TC_CLSU32_REPLACE_HNODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) TC_CLSU32_DELETE_HNODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) struct tc_cls_u32_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) struct flow_cls_common_offload common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* knode values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) enum tc_clsu32_command command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct tc_cls_u32_knode knode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct tc_cls_u32_hnode hnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static inline bool tc_can_offload(const struct net_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return dev->features & NETIF_F_HW_TC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) static inline bool tc_can_offload_extack(const struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) bool can = tc_can_offload(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (!can)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) return can;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) tc_cls_can_offload_and_chain0(const struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) struct flow_cls_common_offload *common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) if (!tc_can_offload_extack(dev, common->extack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (common->chain_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) NL_SET_ERR_MSG(common->extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) "Driver supports only offload of chain 0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static inline bool tc_skip_hw(u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static inline bool tc_skip_sw(u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static inline bool tc_flags_valid(u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) TCA_CLS_FLAGS_VERBOSE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static inline bool tc_in_hw(u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) const struct tcf_proto *tp, u32 flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) cls_common->chain_index = tp->chain->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) cls_common->protocol = tp->protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) cls_common->prio = tp->prio >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) cls_common->extack = extack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (tc_skb_ext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) return tc_skb_ext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) enum tc_matchall_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) TC_CLSMATCHALL_REPLACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) TC_CLSMATCHALL_DESTROY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) TC_CLSMATCHALL_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct tc_cls_matchall_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) struct flow_cls_common_offload common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) enum tc_matchall_command command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct flow_rule *rule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct flow_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) unsigned long cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) enum tc_clsbpf_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) TC_CLSBPF_OFFLOAD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) TC_CLSBPF_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) struct tc_cls_bpf_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) struct flow_cls_common_offload common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) enum tc_clsbpf_command command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) struct tcf_exts *exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) struct bpf_prog *prog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) struct bpf_prog *oldprog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) bool exts_integrated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct tc_mqprio_qopt_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) /* struct tc_mqprio_qopt must always be the first element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) struct tc_mqprio_qopt qopt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) u16 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) u16 shaper;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) u64 min_rate[TC_QOPT_MAX_QUEUE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) u64 max_rate[TC_QOPT_MAX_QUEUE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) /* This structure holds cookie structure that is passed from user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * to the kernel for actions and classifiers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) struct tc_cookie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) u8 *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) struct tc_qopt_offload_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct gnet_stats_basic_packed *bstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct gnet_stats_queue *qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) enum tc_mq_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) TC_MQ_CREATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) TC_MQ_DESTROY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) TC_MQ_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) TC_MQ_GRAFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) struct tc_mq_opt_offload_graft_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) unsigned long queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) u32 child_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) struct tc_mq_qopt_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) enum tc_mq_command command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) struct tc_qopt_offload_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct tc_mq_opt_offload_graft_params graft_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) enum tc_red_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) TC_RED_REPLACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) TC_RED_DESTROY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) TC_RED_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) TC_RED_XSTATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) TC_RED_GRAFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct tc_red_qopt_offload_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) u32 min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) u32 max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) u32 probability;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) u32 limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) bool is_ecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) bool is_harddrop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) bool is_nodrop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) struct gnet_stats_queue *qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) struct tc_red_qopt_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) enum tc_red_command command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) u32 parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct tc_red_qopt_offload_params set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) struct tc_qopt_offload_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct red_stats *xstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) u32 child_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) enum tc_gred_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) TC_GRED_REPLACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) TC_GRED_DESTROY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) TC_GRED_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) struct tc_gred_vq_qopt_offload_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) bool present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) u32 limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) u32 prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) u32 min;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) u32 max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) bool is_ecn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) bool is_harddrop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) u32 probability;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) /* Only need backlog, see struct tc_prio_qopt_offload_params */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) u32 *backlog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct tc_gred_qopt_offload_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) bool grio_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) bool wred_on;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) unsigned int dp_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) unsigned int dp_def;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct gnet_stats_queue *qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) struct tc_gred_qopt_offload_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) struct gnet_stats_basic_packed bstats[MAX_DPs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) struct gnet_stats_queue qstats[MAX_DPs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct red_stats *xstats[MAX_DPs];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) struct tc_gred_qopt_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) enum tc_gred_command command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) u32 parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) struct tc_gred_qopt_offload_params set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct tc_gred_qopt_offload_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) enum tc_prio_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) TC_PRIO_REPLACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) TC_PRIO_DESTROY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) TC_PRIO_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) TC_PRIO_GRAFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct tc_prio_qopt_offload_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) int bands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) u8 priomap[TC_PRIO_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) /* At the point of un-offloading the Qdisc, the reported backlog and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * qlen need to be reduced by the portion that is in HW.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) struct gnet_stats_queue *qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) struct tc_prio_qopt_offload_graft_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) u8 band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) u32 child_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct tc_prio_qopt_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) enum tc_prio_command command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) u32 parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) struct tc_prio_qopt_offload_params replace_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) struct tc_qopt_offload_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) struct tc_prio_qopt_offload_graft_params graft_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) enum tc_root_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) TC_ROOT_GRAFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) struct tc_root_qopt_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) enum tc_root_command command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) bool ingress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) enum tc_ets_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) TC_ETS_REPLACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) TC_ETS_DESTROY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) TC_ETS_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) TC_ETS_GRAFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct tc_ets_qopt_offload_replace_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) unsigned int bands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) u8 priomap[TC_PRIO_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) unsigned int weights[TCQ_ETS_MAX_BANDS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) struct gnet_stats_queue *qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) struct tc_ets_qopt_offload_graft_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) u8 band;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) u32 child_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) struct tc_ets_qopt_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) enum tc_ets_command command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) u32 parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) struct tc_ets_qopt_offload_replace_params replace_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct tc_qopt_offload_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct tc_ets_qopt_offload_graft_params graft_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) enum tc_tbf_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) TC_TBF_REPLACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) TC_TBF_DESTROY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) TC_TBF_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct tc_tbf_qopt_offload_replace_params {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) struct psched_ratecfg rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) u32 max_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) struct gnet_stats_queue *qstats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) struct tc_tbf_qopt_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) enum tc_tbf_command command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) u32 parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) struct tc_tbf_qopt_offload_replace_params replace_params;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) struct tc_qopt_offload_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) enum tc_fifo_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) TC_FIFO_REPLACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) TC_FIFO_DESTROY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) TC_FIFO_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct tc_fifo_qopt_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) enum tc_fifo_command command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) u32 parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) struct tc_qopt_offload_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) #endif