^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * net/sched/cls_fw.c Classifier mapping ipchains' fwmark to traffic class.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Changes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_walk off by one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_delete killed all the filter (and kernel).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Alex <alex@pilotsoft.com> : 2004xxyy: Added Action extension
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/act_api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <net/sch_generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define HTSIZE 256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct fw_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct fw_filter __rcu *ht[HTSIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct fw_filter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct fw_filter __rcu *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) struct tcf_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) int ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct tcf_exts exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct tcf_proto *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct rcu_work rwork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static u32 fw_hash(u32 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) handle ^= (handle >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) handle ^= (handle >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return handle % HTSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct tcf_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct fw_head *head = rcu_dereference_bh(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct fw_filter *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u32 id = skb->mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (head != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) id &= head->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) for (f = rcu_dereference_bh(head->ht[fw_hash(id)]); f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) f = rcu_dereference_bh(f->next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) if (f->id == id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *res = f->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if (!tcf_match_indev(skb, f->ifindex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) r = tcf_exts_exec(skb, &f->exts, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct Qdisc *q = tcf_block_q(tp->chain->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /* Old method: classify the packet using its skb mark. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (id && (TC_H_MAJ(id) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) !(TC_H_MAJ(id ^ q->handle)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) res->classid = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) res->class = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static void *fw_get(struct tcf_proto *tp, u32 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) struct fw_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) struct fw_filter *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (head == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) f = rtnl_dereference(head->ht[fw_hash(handle)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) for (; f; f = rtnl_dereference(f->next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (f->id == handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static int fw_init(struct tcf_proto *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* We don't allocate fw_head here, because in the old method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * we don't need it at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static void __fw_delete_filter(struct fw_filter *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) tcf_exts_destroy(&f->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) tcf_exts_put_net(&f->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) kfree(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static void fw_delete_filter_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct fw_filter *f = container_of(to_rcu_work(work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct fw_filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) rwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) __fw_delete_filter(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static void fw_destroy(struct tcf_proto *tp, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct fw_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct fw_filter *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (head == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) for (h = 0; h < HTSIZE; h++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) while ((f = rtnl_dereference(head->ht[h])) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) RCU_INIT_POINTER(head->ht[h],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) rtnl_dereference(f->next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) tcf_unbind_filter(tp, &f->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (tcf_exts_get_net(&f->exts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) tcf_queue_work(&f->rwork, fw_delete_filter_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) __fw_delete_filter(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) kfree_rcu(head, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static int fw_delete(struct tcf_proto *tp, void *arg, bool *last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) bool rtnl_held, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct fw_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) struct fw_filter *f = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct fw_filter __rcu **fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct fw_filter *pfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) int h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) if (head == NULL || f == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) fp = &head->ht[fw_hash(f->id)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) for (pfp = rtnl_dereference(*fp); pfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (pfp == f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) tcf_unbind_filter(tp, &f->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) tcf_exts_get_net(&f->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) tcf_queue_work(&f->rwork, fw_delete_filter_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) *last = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) for (h = 0; h < HTSIZE; h++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (rcu_access_pointer(head->ht[h])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) *last = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) [TCA_FW_CLASSID] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) [TCA_FW_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) [TCA_FW_MASK] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static int fw_set_parms(struct net *net, struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct fw_filter *f, struct nlattr **tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct nlattr **tca, unsigned long base, bool ovr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct fw_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &f->exts, ovr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) true, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (tb[TCA_FW_CLASSID]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) tcf_bind_filter(tp, &f->res, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (tb[TCA_FW_INDEV]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) ret = tcf_change_indev(net, tb[TCA_FW_INDEV], extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) f->ifindex = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if (tb[TCA_FW_MASK]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) mask = nla_get_u32(tb[TCA_FW_MASK]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (mask != head->mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) } else if (head->mask != 0xFFFFFFFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static int fw_change(struct net *net, struct sk_buff *in_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) struct tcf_proto *tp, unsigned long base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) u32 handle, struct nlattr **tca, void **arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) bool ovr, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct fw_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct fw_filter *f = *arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) struct nlattr *opt = tca[TCA_OPTIONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct nlattr *tb[TCA_FW_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (!opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return handle ? -EINVAL : 0; /* Succeed if it is old method. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) err = nla_parse_nested_deprecated(tb, TCA_FW_MAX, opt, fw_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) if (f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct fw_filter *pfp, *fnew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct fw_filter __rcu **fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (f->id != handle && handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) fnew = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if (!fnew)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) fnew->id = f->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) fnew->res = f->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) fnew->ifindex = f->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) fnew->tp = f->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) err = tcf_exts_init(&fnew->exts, net, TCA_FW_ACT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) TCA_FW_POLICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) kfree(fnew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) err = fw_set_parms(net, tp, fnew, tb, tca, base, ovr, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) tcf_exts_destroy(&fnew->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) kfree(fnew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) fp = &head->ht[fw_hash(fnew->id)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) for (pfp = rtnl_dereference(*fp); pfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) fp = &pfp->next, pfp = rtnl_dereference(*fp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) if (pfp == f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) RCU_INIT_POINTER(fnew->next, rtnl_dereference(pfp->next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) rcu_assign_pointer(*fp, fnew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) tcf_unbind_filter(tp, &f->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) tcf_exts_get_net(&f->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) tcf_queue_work(&f->rwork, fw_delete_filter_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) *arg = fnew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (!head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) u32 mask = 0xFFFFFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (tb[TCA_FW_MASK])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) mask = nla_get_u32(tb[TCA_FW_MASK]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) head = kzalloc(sizeof(*head), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) if (!head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) head->mask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) rcu_assign_pointer(tp->root, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (f == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) err = tcf_exts_init(&f->exts, net, TCA_FW_ACT, TCA_FW_POLICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) f->id = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) f->tp = tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) err = fw_set_parms(net, tp, f, tb, tca, base, ovr, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) RCU_INIT_POINTER(f->next, head->ht[fw_hash(handle)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) rcu_assign_pointer(head->ht[fw_hash(handle)], f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) *arg = f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) tcf_exts_destroy(&f->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) kfree(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) bool rtnl_held)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) struct fw_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) int h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (head == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) arg->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (arg->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) for (h = 0; h < HTSIZE; h++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct fw_filter *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) for (f = rtnl_dereference(head->ht[h]); f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) f = rtnl_dereference(f->next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (arg->count < arg->skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (arg->fn(tp, f, arg) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) arg->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static int fw_dump(struct net *net, struct tcf_proto *tp, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct fw_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct fw_filter *f = fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) if (f == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) t->tcm_handle = f->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (!f->res.classid && !tcf_exts_has_actions(&f->exts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (nest == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (f->res.classid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (f->ifindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) dev = __dev_get_by_index(net, f->ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (dev && nla_put_string(skb, TCA_FW_INDEV, dev->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (head->mask != 0xFFFFFFFF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) nla_put_u32(skb, TCA_FW_MASK, head->mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (tcf_exts_dump(skb, &f->exts) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (tcf_exts_dump_stats(skb, &f->exts) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) unsigned long base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct fw_filter *f = fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (f && f->res.classid == classid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) __tcf_bind_filter(q, &f->res, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) __tcf_unbind_filter(q, &f->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static struct tcf_proto_ops cls_fw_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) .kind = "fw",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) .classify = fw_classify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) .init = fw_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) .destroy = fw_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) .get = fw_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) .change = fw_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) .delete = fw_delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) .walk = fw_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) .dump = fw_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) .bind_class = fw_bind_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) static int __init init_fw(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return register_tcf_proto_ops(&cls_fw_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static void __exit exit_fw(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) unregister_tcf_proto_ops(&cls_fw_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) module_init(init_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) module_exit(exit_fw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) MODULE_LICENSE("GPL");