^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * net/sched/ematch.c Extended Match API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors: Thomas Graf <tgraf@suug.ch>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * ==========================================================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * An extended match (ematch) is a small classification tool not worth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * writing a full classifier for. Ematches can be interconnected to form
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * a logic expression and get attached to classifiers to extend their
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * functionatlity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * The userspace part transforms the logic expressions into an array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * consisting of multiple sequences of interconnected ematches separated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * by markers. Precedence is implemented by a special ematch kind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * referencing a sequence beyond the marker of the current sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * causing the current position in the sequence to be pushed onto a stack
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * to allow the current position to be overwritten by the position referenced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * in the special ematch. Matching continues in the new sequence until a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * marker is reached causing the position to be restored from the stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * A AND (B1 OR B2) AND C AND D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * ------->-PUSH-------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * -->-- / -->-- \ -->--
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * / \ / / \ \ / \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * +-------+-------+-------+-------+-------+--------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * | A AND | B AND | C AND | D END | B1 OR | B2 END |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * +-------+-------+-------+-------+-------+--------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * \ /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * --------<-POP---------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * where B is a virtual ematch referencing to sequence starting with B1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * ==========================================================================
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * How to write an ematch in 60 seconds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * ------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * 1) Provide a matcher function:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * static int my_match(struct sk_buff *skb, struct tcf_ematch *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * struct tcf_pkt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * struct mydata *d = (struct mydata *) m->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * if (...matching goes here...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * 2) Fill out a struct tcf_ematch_ops:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * static struct tcf_ematch_ops my_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * .kind = unique id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * .datalen = sizeof(struct mydata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * .match = my_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * 3) Register/Unregister your ematch:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * static int __init init_my_ematch(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * return tcf_em_register(&my_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * static void __exit exit_my_ematch(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * tcf_em_unregister(&my_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * module_init(init_my_ematch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * module_exit(exit_my_ematch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * 4) By now you should have two more seconds left, barely enough to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * open up a beer to watch the compilation going.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static LIST_HEAD(ematch_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) static DEFINE_RWLOCK(ematch_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static struct tcf_ematch_ops *tcf_em_lookup(u16 kind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) struct tcf_ematch_ops *e = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) read_lock(&ematch_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) list_for_each_entry(e, &ematch_ops, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (kind == e->kind) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (!try_module_get(e->owner))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) e = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) read_unlock(&ematch_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) read_unlock(&ematch_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * tcf_em_register - register an extended match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * @ops: ematch operations lookup table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * This function must be called by ematches to announce their presence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * The given @ops must have kind set to a unique identifier and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * callback match() must be implemented. All other callbacks are optional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * and a fallback implementation is used instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * Returns -EEXISTS if an ematch of the same kind has already registered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int tcf_em_register(struct tcf_ematch_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int err = -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct tcf_ematch_ops *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (ops->match == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) write_lock(&ematch_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) list_for_each_entry(e, &ematch_ops, link)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (ops->kind == e->kind)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) list_add_tail(&ops->link, &ematch_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) write_unlock(&ematch_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) EXPORT_SYMBOL(tcf_em_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * tcf_em_unregister - unregster and extended match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @ops: ematch operations lookup table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * This function must be called by ematches to announce their disappearance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * for examples when the module gets unloaded. The @ops parameter must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * the same as the one used for registration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Returns -ENOENT if no matching ematch was found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) void tcf_em_unregister(struct tcf_ematch_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) write_lock(&ematch_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) list_del(&ops->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) write_unlock(&ematch_mod_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) EXPORT_SYMBOL(tcf_em_unregister);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static inline struct tcf_ematch *tcf_em_get_match(struct tcf_ematch_tree *tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return &tree->matches[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static int tcf_em_validate(struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct tcf_ematch_tree_hdr *tree_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) struct tcf_ematch *em, struct nlattr *nla, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) int err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) struct tcf_ematch_hdr *em_hdr = nla_data(nla);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) int data_len = nla_len(nla) - sizeof(*em_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) void *data = (void *) em_hdr + sizeof(*em_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) struct net *net = tp->chain->block->net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (!TCF_EM_REL_VALID(em_hdr->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (em_hdr->kind == TCF_EM_CONTAINER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* Special ematch called "container", carries an index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * referencing an external ematch sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u32 ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (data_len < sizeof(ref))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ref = *(u32 *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (ref >= tree_hdr->nmatches)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* We do not allow backward jumps to avoid loops and jumps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * to our own position are of course illegal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (ref <= idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) em->data = ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* Note: This lookup will increase the module refcnt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * of the ematch module referenced. In case of a failure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * a destroy function is called by the underlying layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * which automatically releases the reference again, therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * the module MUST not be given back under any circumstances
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * here. Be aware, the destroy function assumes that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * module is held if the ops field is non zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) em->ops = tcf_em_lookup(em_hdr->kind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (em->ops == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) err = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #ifdef CONFIG_MODULES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) __rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) request_module("ematch-kind-%u", em_hdr->kind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) em->ops = tcf_em_lookup(em_hdr->kind);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (em->ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* We dropped the RTNL mutex in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * perform the module load. Tell the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * to replay the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) module_put(em->ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) em->ops = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) err = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /* ematch module provides expected length of data, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * can do a basic sanity check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (em->ops->datalen && data_len < em->ops->datalen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (em->ops->change) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (em_hdr->flags & TCF_EM_SIMPLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) err = em->ops->change(net, data, data_len, em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) } else if (data_len > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* ematch module doesn't provide an own change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * procedure and expects us to allocate and copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * the ematch data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * TCF_EM_SIMPLE may be specified stating that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * data only consists of a u32 integer and the module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * does not expected a memory reference but rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * the value carried.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (em_hdr->flags & TCF_EM_SIMPLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (data_len < sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) em->data = *(u32 *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) void *v = kmemdup(data, data_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (v == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) em->data = (unsigned long) v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) em->datalen = data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) em->matchid = em_hdr->matchid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) em->flags = em_hdr->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) em->net = net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static const struct nla_policy em_policy[TCA_EMATCH_TREE_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) [TCA_EMATCH_TREE_HDR] = { .len = sizeof(struct tcf_ematch_tree_hdr) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) [TCA_EMATCH_TREE_LIST] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * tcf_em_tree_validate - validate ematch config TLV and build ematch tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * @tp: classifier kind handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * @nla: ematch tree configuration TLV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * @tree: destination ematch tree variable to store the resulting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) * ematch tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * This function validates the given configuration TLV @nla and builds an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * ematch tree in @tree. The resulting tree must later be copied into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * the private classifier data using tcf_em_tree_change(). You MUST NOT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * provide the ematch tree variable of the private classifier data directly,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * the changes would not be locked properly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * Returns a negative error code if the configuration TLV contains errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) struct tcf_ematch_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) int idx, list_len, matches_len, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct nlattr *tb[TCA_EMATCH_TREE_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct nlattr *rt_match, *rt_hdr, *rt_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct tcf_ematch_tree_hdr *tree_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct tcf_ematch *em;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) memset(tree, 0, sizeof(*tree));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (!nla)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) err = nla_parse_nested_deprecated(tb, TCA_EMATCH_TREE_MAX, nla,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) em_policy, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) rt_hdr = tb[TCA_EMATCH_TREE_HDR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) rt_list = tb[TCA_EMATCH_TREE_LIST];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (rt_hdr == NULL || rt_list == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) tree_hdr = nla_data(rt_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) memcpy(&tree->hdr, tree_hdr, sizeof(*tree_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) rt_match = nla_data(rt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) list_len = nla_len(rt_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) matches_len = tree_hdr->nmatches * sizeof(*em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) tree->matches = kzalloc(matches_len, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (tree->matches == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /* We do not use nla_parse_nested here because the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * number of attributes is unknown. This saves us the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * for a tb buffer which would serve no purpose at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) * The array of rt attributes is parsed in the order as they are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * provided, their type must be incremental from 1 to n. Even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * if it does not serve any real purpose, a failure of sticking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * to this policy will result in parsing failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) for (idx = 0; nla_ok(rt_match, list_len); idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (rt_match->nla_type != (idx + 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) goto errout_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (idx >= tree_hdr->nmatches)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) goto errout_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (nla_len(rt_match) < sizeof(struct tcf_ematch_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) goto errout_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) em = tcf_em_get_match(tree, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) err = tcf_em_validate(tp, tree_hdr, em, rt_match, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) goto errout_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) rt_match = nla_next(rt_match, &list_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /* Check if the number of matches provided by userspace actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * complies with the array of matches. The number was used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * the validation of references and a mismatch could lead to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * undefined references during the matching process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (idx != tree_hdr->nmatches) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) goto errout_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) errout_abort:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) tcf_em_tree_destroy(tree);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) EXPORT_SYMBOL(tcf_em_tree_validate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * tcf_em_tree_destroy - destroy an ematch tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * @tree: ematch tree to be deleted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * This functions destroys an ematch tree previously created by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * tcf_em_tree_validate()/tcf_em_tree_change(). You must ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * the ematch tree is not in use before calling this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) void tcf_em_tree_destroy(struct tcf_ematch_tree *tree)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) if (tree->matches == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) for (i = 0; i < tree->hdr.nmatches; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct tcf_ematch *em = tcf_em_get_match(tree, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (em->ops) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (em->ops->destroy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) em->ops->destroy(em);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) else if (!tcf_em_is_simple(em))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) kfree((void *) em->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) module_put(em->ops->owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) tree->hdr.nmatches = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) kfree(tree->matches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) tree->matches = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) EXPORT_SYMBOL(tcf_em_tree_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * tcf_em_tree_dump - dump ematch tree into a rtnl message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * @skb: skb holding the rtnl message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * @tree: ematch tree to be dumped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * @tlv: TLV type to be used to encapsulate the tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * This function dumps a ematch tree into a rtnl message. It is valid to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * call this function while the ematch tree is in use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * Returns -1 if the skb tailroom is insufficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) u8 *tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) struct nlattr *top_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct nlattr *list_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) top_start = nla_nest_start_noflag(skb, tlv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (top_start == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) if (nla_put(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) list_start = nla_nest_start_noflag(skb, TCA_EMATCH_TREE_LIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) if (list_start == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) tail = skb_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) for (i = 0; i < tree->hdr.nmatches; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct nlattr *match_start = (struct nlattr *)tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct tcf_ematch *em = tcf_em_get_match(tree, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct tcf_ematch_hdr em_hdr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) .kind = em->ops ? em->ops->kind : TCF_EM_CONTAINER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) .matchid = em->matchid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) .flags = em->flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (nla_put(skb, i + 1, sizeof(em_hdr), &em_hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (em->ops && em->ops->dump) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (em->ops->dump(skb, em) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) } else if (tcf_em_is_container(em) || tcf_em_is_simple(em)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) u32 u = em->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) nla_put_nohdr(skb, sizeof(u), &u);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) } else if (em->datalen > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) nla_put_nohdr(skb, em->datalen, (void *) em->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) tail = skb_tail_pointer(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) match_start->nla_len = tail - (u8 *)match_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) nla_nest_end(skb, list_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) nla_nest_end(skb, top_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) EXPORT_SYMBOL(tcf_em_tree_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct tcf_pkt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) int r = em->ops->match(skb, em, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return tcf_em_is_inverted(em) ? !r : r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) /* Do not use this function directly, use tcf_em_tree_match instead */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) int __tcf_em_tree_match(struct sk_buff *skb, struct tcf_ematch_tree *tree,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) struct tcf_pkt_info *info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) int stackp = 0, match_idx = 0, res = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct tcf_ematch *cur_match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) int stack[CONFIG_NET_EMATCH_STACK];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) proceed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) while (match_idx < tree->hdr.nmatches) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) cur_match = tcf_em_get_match(tree, match_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (tcf_em_is_container(cur_match)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (unlikely(stackp >= CONFIG_NET_EMATCH_STACK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) goto stack_overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) stack[stackp++] = match_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) match_idx = cur_match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) goto proceed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) res = tcf_em_match(skb, cur_match, info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (tcf_em_early_end(cur_match, res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) match_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) pop_stack:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (stackp > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) match_idx = stack[--stackp];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) cur_match = tcf_em_get_match(tree, match_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (tcf_em_is_inverted(cur_match))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) res = !res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (tcf_em_early_end(cur_match, res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) goto pop_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) match_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) goto proceed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) stack_overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) net_warn_ratelimited("tc ematch: local stack overflow, increase NET_EMATCH_STACK\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) EXPORT_SYMBOL(__tcf_em_tree_match);