^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * The filters are packed to hash tables of key nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * with a set of 32bit key/mask pairs at every node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Nodes reference next level hash tables etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * This scheme is the best universal classifier I managed to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * invent; it is not super-fast, but it is not slow (provided you
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * program it correctly), and general enough. And its relative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * speed grows as the number of rules becomes larger.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * It seems that it represents the best middle point between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * speed and manageability both by human and by machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * It is especially useful for link sharing combined with QoS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * pure RSVP doesn't need such a general approach and can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * much simpler (and faster) schemes, sort of cls_rsvp.c.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/rtnetlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/netdevice.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/hash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <net/act_api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <linux/idr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct tc_u_knode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct tc_u_knode __rcu *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct tc_u_hnode __rcu *ht_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct tcf_exts exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u8 fshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct tcf_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct tc_u_hnode __rcu *ht_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #ifdef CONFIG_CLS_U32_PERF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct tc_u32_pcnt __percpu *pf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned int in_hw_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #ifdef CONFIG_CLS_U32_MARK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) u32 __percpu *pcpu_success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct rcu_work rwork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* The 'sel' field MUST be the last field in structure to allow for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * tc_u32_keys allocated at end of structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct tc_u32_sel sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct tc_u_hnode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct tc_u_hnode __rcu *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) u32 prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) int refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned int divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct idr handle_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) bool is_root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* The 'ht' field MUST be the last field in structure to allow for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * more entries allocated at end of structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct tc_u_knode __rcu *ht[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct tc_u_common {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct tc_u_hnode __rcu *hlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct idr handle_idr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct hlist_node hnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) long knodes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static inline unsigned int u32_hash_fold(__be32 key,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) const struct tc_u32_sel *sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) u8 fshift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) unsigned int h = ntohl(key & sel->hmask) >> fshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct tcf_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) struct tc_u_knode *knode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unsigned int off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) } stack[TC_U32_MAXDEPTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct tc_u_hnode *ht = rcu_dereference_bh(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned int off = skb_network_offset(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) struct tc_u_knode *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) int sdepth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) int off2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) int sel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #ifdef CONFIG_CLS_U32_PERF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) int i, r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) next_ht:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) n = rcu_dereference_bh(ht->ht[sel]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) next_knode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct tc_u32_key *key = n->sel.keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #ifdef CONFIG_CLS_U32_PERF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) __this_cpu_inc(n->pf->rcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (tc_skip_sw(n->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) n = rcu_dereference_bh(n->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) goto next_knode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #ifdef CONFIG_CLS_U32_MARK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if ((skb->mark & n->mask) != n->val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) n = rcu_dereference_bh(n->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) goto next_knode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) __this_cpu_inc(*n->pcpu_success);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) for (i = n->sel.nkeys; i > 0; i--, key++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) int toff = off + key->off + (off2 & key->offmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) __be32 *data, hdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (skb_headroom(skb) + toff > INT_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) data = skb_header_pointer(skb, toff, 4, &hdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if ((*data ^ key->val) & key->mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) n = rcu_dereference_bh(n->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) goto next_knode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #ifdef CONFIG_CLS_U32_PERF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) __this_cpu_inc(n->pf->kcnts[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) ht = rcu_dereference_bh(n->ht_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) if (!ht) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) check_terminal:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (n->sel.flags & TC_U32_TERMINAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) *res = n->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!tcf_match_indev(skb, n->ifindex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) n = rcu_dereference_bh(n->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) goto next_knode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #ifdef CONFIG_CLS_U32_PERF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) __this_cpu_inc(n->pf->rhit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) r = tcf_exts_exec(skb, &n->exts, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (r < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) n = rcu_dereference_bh(n->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) goto next_knode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) n = rcu_dereference_bh(n->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) goto next_knode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /* PUSH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (sdepth >= TC_U32_MAXDEPTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) goto deadloop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) stack[sdepth].knode = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) stack[sdepth].off = off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) sdepth++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ht = rcu_dereference_bh(n->ht_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) sel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) if (ht->divisor) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) __be32 *data, hdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) data = skb_header_pointer(skb, off + n->sel.hoff, 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) &hdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) sel = ht->divisor & u32_hash_fold(*data, &n->sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) n->fshift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (!(n->sel.flags & (TC_U32_VAROFFSET | TC_U32_OFFSET | TC_U32_EAT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) goto next_ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (n->sel.flags & (TC_U32_OFFSET | TC_U32_VAROFFSET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) off2 = n->sel.off + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (n->sel.flags & TC_U32_VAROFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) __be16 *data, hdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) data = skb_header_pointer(skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) off + n->sel.offoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 2, &hdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) off2 += ntohs(n->sel.offmask & *data) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) n->sel.offshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) off2 &= ~3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (n->sel.flags & TC_U32_EAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) off += off2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) off2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (off < skb->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) goto next_ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) /* POP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (sdepth--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) n = stack[sdepth].knode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) ht = rcu_dereference_bh(n->ht_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) off = stack[sdepth].off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) goto check_terminal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) deadloop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) net_warn_ratelimited("cls_u32: dead loop\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static struct tc_u_hnode *u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct tc_u_hnode *ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) for (ht = rtnl_dereference(tp_c->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) ht = rtnl_dereference(ht->next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (ht->handle == handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) static struct tc_u_knode *u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) unsigned int sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct tc_u_knode *n = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) sel = TC_U32_HASH(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) if (sel > ht->divisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) for (n = rtnl_dereference(ht->ht[sel]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) n = rtnl_dereference(n->next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (n->handle == handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static void *u32_get(struct tcf_proto *tp, u32 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct tc_u_hnode *ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) struct tc_u_common *tp_c = tp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (TC_U32_HTID(handle) == TC_U32_ROOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ht = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) if (!ht)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (TC_U32_KEY(handle) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) return ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return u32_lookup_key(ht, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* Protected by rtnl lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) return (id | 0x800U) << 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static struct hlist_head *tc_u_common_hash;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #define U32_HASH_SHIFT 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static void *tc_u_common_ptr(const struct tcf_proto *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct tcf_block *block = tp->chain->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /* The block sharing is currently supported only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * for classless qdiscs. In that case we use block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * for tc_u_common identification. In case the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * block is not shared, block->q is a valid pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * and we can use that. That works for classful qdiscs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) if (tcf_block_shared(block))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return block->q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) static struct hlist_head *tc_u_hash(void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return tc_u_common_hash + hash_ptr(key, U32_HASH_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static struct tc_u_common *tc_u_common_find(void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) struct tc_u_common *tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) hlist_for_each_entry(tc, tc_u_hash(key), hnode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (tc->ptr == key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static int u32_init(struct tcf_proto *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct tc_u_hnode *root_ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) void *key = tc_u_common_ptr(tp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct tc_u_common *tp_c = tc_u_common_find(key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) root_ht = kzalloc(struct_size(root_ht, ht, 1), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (root_ht == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) root_ht->refcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) root_ht->prio = tp->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) root_ht->is_root = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) idr_init(&root_ht->handle_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (tp_c == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (tp_c == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) kfree(root_ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) tp_c->ptr = key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) INIT_HLIST_NODE(&tp_c->hnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) idr_init(&tp_c->handle_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) hlist_add_head(&tp_c->hnode, tc_u_hash(key));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) tp_c->refcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) RCU_INIT_POINTER(root_ht->next, tp_c->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) rcu_assign_pointer(tp_c->hlist, root_ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) root_ht->refcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) rcu_assign_pointer(tp->root, root_ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) tp->data = tp_c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static void __u32_destroy_key(struct tc_u_knode *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) tcf_exts_destroy(&n->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (ht && --ht->refcnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) kfree(ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) kfree(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static void u32_destroy_key(struct tc_u_knode *n, bool free_pf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) tcf_exts_put_net(&n->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #ifdef CONFIG_CLS_U32_PERF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (free_pf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) free_percpu(n->pf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) #ifdef CONFIG_CLS_U32_MARK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (free_pf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) free_percpu(n->pcpu_success);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) __u32_destroy_key(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /* u32_delete_key_rcu should be called when free'ing a copied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * version of a tc_u_knode obtained from u32_init_knode(). When
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * copies are obtained from u32_init_knode() the statistics are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * shared between the old and new copies to allow readers to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * continue to update the statistics during the copy. To support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * this the u32_delete_key_rcu variant does not free the percpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * statistics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static void u32_delete_key_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct tc_u_knode *key = container_of(to_rcu_work(work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct tc_u_knode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) rwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) u32_destroy_key(key, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /* u32_delete_key_freepf_rcu is the rcu callback variant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * that free's the entire structure including the statistics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * percpu variables. Only use this if the key is not a copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * returned by u32_init_knode(). See u32_delete_key_rcu()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * for the variant that should be used with keys return from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * u32_init_knode()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static void u32_delete_key_freepf_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct tc_u_knode *key = container_of(to_rcu_work(work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct tc_u_knode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) rwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) u32_destroy_key(key, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) struct tc_u_common *tp_c = tp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct tc_u_knode __rcu **kp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct tc_u_knode *pkp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct tc_u_hnode *ht = rtnl_dereference(key->ht_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (ht) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) kp = &ht->ht[TC_U32_HASH(key->handle)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) for (pkp = rtnl_dereference(*kp); pkp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) kp = &pkp->next, pkp = rtnl_dereference(*kp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (pkp == key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) RCU_INIT_POINTER(*kp, key->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) tp_c->knodes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) tcf_unbind_filter(tp, &key->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) idr_remove(&ht->handle_idr, key->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) tcf_exts_get_net(&key->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) tcf_queue_work(&key->rwork, u32_delete_key_freepf_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) struct tcf_block *block = tp->chain->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) struct tc_cls_u32_offload cls_u32 = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) tc_cls_common_offload_init(&cls_u32.common, tp, h->flags, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) cls_u32.command = TC_CLSU32_DELETE_HNODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) cls_u32.hnode.divisor = h->divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) cls_u32.hnode.handle = h->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) cls_u32.hnode.prio = h->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) static int u32_replace_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) u32 flags, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) struct tcf_block *block = tp->chain->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) struct tc_cls_u32_offload cls_u32 = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) bool skip_sw = tc_skip_sw(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) bool offloaded = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) cls_u32.command = TC_CLSU32_NEW_HNODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) cls_u32.hnode.divisor = h->divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) cls_u32.hnode.handle = h->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) cls_u32.hnode.prio = h->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) err = tc_setup_cb_call(block, TC_SETUP_CLSU32, &cls_u32, skip_sw, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) u32_clear_hw_hnode(tp, h, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) } else if (err > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) offloaded = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (skip_sw && !offloaded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static void u32_remove_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct tcf_block *block = tp->chain->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) struct tc_cls_u32_offload cls_u32 = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) cls_u32.command = TC_CLSU32_DELETE_KNODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) cls_u32.knode.handle = n->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) tc_setup_cb_destroy(block, tp, TC_SETUP_CLSU32, &cls_u32, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) &n->flags, &n->in_hw_count, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static int u32_replace_hw_knode(struct tcf_proto *tp, struct tc_u_knode *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) u32 flags, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct tcf_block *block = tp->chain->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) struct tc_cls_u32_offload cls_u32 = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) bool skip_sw = tc_skip_sw(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) tc_cls_common_offload_init(&cls_u32.common, tp, flags, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) cls_u32.command = TC_CLSU32_REPLACE_KNODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) cls_u32.knode.handle = n->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) cls_u32.knode.fshift = n->fshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #ifdef CONFIG_CLS_U32_MARK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) cls_u32.knode.val = n->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) cls_u32.knode.mask = n->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) cls_u32.knode.val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) cls_u32.knode.mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) cls_u32.knode.sel = &n->sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) cls_u32.knode.res = &n->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) cls_u32.knode.exts = &n->exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if (n->ht_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) cls_u32.knode.link_handle = ht->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) err = tc_setup_cb_add(block, tp, TC_SETUP_CLSU32, &cls_u32, skip_sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) &n->flags, &n->in_hw_count, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) u32_remove_hw_knode(tp, n, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (skip_sw && !(n->flags & TCA_CLS_FLAGS_IN_HW))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) struct tc_u_common *tp_c = tp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct tc_u_knode *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) unsigned int h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) for (h = 0; h <= ht->divisor; h++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) while ((n = rtnl_dereference(ht->ht[h])) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) RCU_INIT_POINTER(ht->ht[h],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) rtnl_dereference(n->next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) tp_c->knodes--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) tcf_unbind_filter(tp, &n->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) u32_remove_hw_knode(tp, n, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) idr_remove(&ht->handle_idr, n->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (tcf_exts_get_net(&n->exts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) tcf_queue_work(&n->rwork, u32_delete_key_freepf_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) u32_destroy_key(n, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct tc_u_common *tp_c = tp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) struct tc_u_hnode __rcu **hn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct tc_u_hnode *phn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) WARN_ON(--ht->refcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) u32_clear_hnode(tp, ht, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) hn = &tp_c->hlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) for (phn = rtnl_dereference(*hn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) phn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) hn = &phn->next, phn = rtnl_dereference(*hn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) if (phn == ht) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) u32_clear_hw_hnode(tp, ht, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) idr_destroy(&ht->handle_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) idr_remove(&tp_c->handle_idr, ht->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) RCU_INIT_POINTER(*hn, ht->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) kfree_rcu(ht, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static void u32_destroy(struct tcf_proto *tp, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct tc_u_common *tp_c = tp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct tc_u_hnode *root_ht = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) WARN_ON(root_ht == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (root_ht && --root_ht->refcnt == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) u32_destroy_hnode(tp, root_ht, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (--tp_c->refcnt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) struct tc_u_hnode *ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) hlist_del(&tp_c->hnode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) while ((ht = rtnl_dereference(tp_c->hlist)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) u32_clear_hnode(tp, ht, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) RCU_INIT_POINTER(tp_c->hlist, ht->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) /* u32_destroy_key() will later free ht for us, if it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) * still referenced by some knode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (--ht->refcnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) kfree_rcu(ht, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) idr_destroy(&tp_c->handle_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) kfree(tp_c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) tp->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static int u32_delete(struct tcf_proto *tp, void *arg, bool *last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) bool rtnl_held, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) struct tc_u_hnode *ht = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) struct tc_u_common *tp_c = tp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (TC_U32_KEY(ht->handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) u32_remove_hw_knode(tp, (struct tc_u_knode *)ht, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) ret = u32_delete_key(tp, (struct tc_u_knode *)ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) if (ht->is_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) NL_SET_ERR_MSG_MOD(extack, "Not allowed to delete root node");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (ht->refcnt == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) u32_destroy_hnode(tp, ht, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) NL_SET_ERR_MSG_MOD(extack, "Can not delete in-use filter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) *last = tp_c->refcnt == 1 && tp_c->knodes == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) static u32 gen_new_kid(struct tc_u_hnode *ht, u32 htid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) u32 index = htid | 0x800;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) u32 max = htid | 0xFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) index = htid + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if (idr_alloc_u32(&ht->handle_idr, NULL, &index, max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) index = max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) [TCA_U32_CLASSID] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) [TCA_U32_HASH] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) [TCA_U32_LINK] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) [TCA_U32_DIVISOR] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) [TCA_U32_SEL] = { .len = sizeof(struct tc_u32_sel) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) [TCA_U32_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) [TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) [TCA_U32_FLAGS] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) static int u32_set_parms(struct net *net, struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) unsigned long base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) struct tc_u_knode *n, struct nlattr **tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) struct nlattr *est, bool ovr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) err = tcf_exts_validate(net, tp, tb, est, &n->exts, ovr, true, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) if (tb[TCA_U32_LINK]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct tc_u_hnode *ht_down = NULL, *ht_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) if (TC_U32_KEY(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) NL_SET_ERR_MSG_MOD(extack, "u32 Link handle must be a hash table");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) if (handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) ht_down = u32_lookup_ht(tp->data, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (!ht_down) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) NL_SET_ERR_MSG_MOD(extack, "Link hash table not found");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (ht_down->is_root) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) NL_SET_ERR_MSG_MOD(extack, "Not linking to root node");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) ht_down->refcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) ht_old = rtnl_dereference(n->ht_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) rcu_assign_pointer(n->ht_down, ht_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (ht_old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) ht_old->refcnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (tb[TCA_U32_CLASSID]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) tcf_bind_filter(tp, &n->res, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (tb[TCA_U32_INDEV]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) ret = tcf_change_indev(net, tb[TCA_U32_INDEV], extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) n->ifindex = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) static void u32_replace_knode(struct tcf_proto *tp, struct tc_u_common *tp_c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct tc_u_knode *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) struct tc_u_knode __rcu **ins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) struct tc_u_knode *pins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) struct tc_u_hnode *ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (TC_U32_HTID(n->handle) == TC_U32_ROOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) ht = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) ht = u32_lookup_ht(tp_c, TC_U32_HTID(n->handle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) ins = &ht->ht[TC_U32_HASH(n->handle)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* The node must always exist for it to be replaced if this is not the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) * case then something went very wrong elsewhere.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) for (pins = rtnl_dereference(*ins); ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ins = &pins->next, pins = rtnl_dereference(*ins))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) if (pins->handle == n->handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) idr_replace(&ht->handle_idr, n, n->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) RCU_INIT_POINTER(n->next, pins->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) rcu_assign_pointer(*ins, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct tc_u_knode *n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct tc_u32_sel *s = &n->sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct tc_u_knode *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) new = kzalloc(struct_size(new, sel.keys, s->nkeys), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) RCU_INIT_POINTER(new->next, n->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) new->handle = n->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) RCU_INIT_POINTER(new->ht_up, n->ht_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) new->ifindex = n->ifindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) new->fshift = n->fshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) new->res = n->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) new->flags = n->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) RCU_INIT_POINTER(new->ht_down, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /* bump reference count as long as we hold pointer to structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (ht)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) ht->refcnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) #ifdef CONFIG_CLS_U32_PERF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) /* Statistics may be incremented by readers during update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * so we must keep them in tact. When the node is later destroyed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * a special destroy call must be made to not free the pf memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) new->pf = n->pf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) #ifdef CONFIG_CLS_U32_MARK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) new->val = n->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) new->mask = n->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /* Similarly success statistics must be moved as pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) new->pcpu_success = n->pcpu_success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) memcpy(&new->sel, s, struct_size(s, keys, s->nkeys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) if (tcf_exts_init(&new->exts, net, TCA_U32_ACT, TCA_U32_POLICE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) kfree(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) return new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) static int u32_change(struct net *net, struct sk_buff *in_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) struct tcf_proto *tp, unsigned long base, u32 handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) struct nlattr **tca, void **arg, bool ovr, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct tc_u_common *tp_c = tp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct tc_u_hnode *ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct tc_u_knode *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) struct tc_u32_sel *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) struct nlattr *opt = tca[TCA_OPTIONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) struct nlattr *tb[TCA_U32_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) u32 htid, flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) size_t sel_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (!opt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) if (handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) NL_SET_ERR_MSG_MOD(extack, "Filter handle requires options");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) err = nla_parse_nested_deprecated(tb, TCA_U32_MAX, opt, u32_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (tb[TCA_U32_FLAGS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) flags = nla_get_u32(tb[TCA_U32_FLAGS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) if (!tc_flags_valid(flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) NL_SET_ERR_MSG_MOD(extack, "Invalid filter flags");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) n = *arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) struct tc_u_knode *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (TC_U32_KEY(n->handle) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) NL_SET_ERR_MSG_MOD(extack, "Key node id cannot be zero");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if ((n->flags ^ flags) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) ~(TCA_CLS_FLAGS_IN_HW | TCA_CLS_FLAGS_NOT_IN_HW)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) NL_SET_ERR_MSG_MOD(extack, "Key node flags do not match passed flags");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) new = u32_init_knode(net, tp, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) err = u32_set_parms(net, tp, base, new, tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) tca[TCA_RATE], ovr, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) __u32_destroy_key(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) err = u32_replace_hw_knode(tp, new, flags, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) __u32_destroy_key(new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) if (!tc_in_hw(new->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) u32_replace_knode(tp, tp_c, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) tcf_unbind_filter(tp, &n->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) tcf_exts_get_net(&n->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) tcf_queue_work(&n->rwork, u32_delete_key_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) if (tb[TCA_U32_DIVISOR]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) unsigned int divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (!is_power_of_2(divisor)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) NL_SET_ERR_MSG_MOD(extack, "Divisor is not a power of 2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (divisor-- > 0x100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) NL_SET_ERR_MSG_MOD(extack, "Exceeded maximum 256 hash buckets");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) if (TC_U32_KEY(handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) ht = kzalloc(struct_size(ht, ht, divisor + 1), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (ht == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (handle == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) handle = gen_new_htid(tp->data, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) if (handle == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) kfree(ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) err = idr_alloc_u32(&tp_c->handle_idr, ht, &handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) handle, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) kfree(ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) ht->refcnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) ht->divisor = divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ht->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) ht->prio = tp->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) idr_init(&ht->handle_idr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) ht->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) err = u32_replace_hw_hnode(tp, ht, flags, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) idr_remove(&tp_c->handle_idr, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) kfree(ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) RCU_INIT_POINTER(ht->next, tp_c->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) rcu_assign_pointer(tp_c->hlist, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) *arg = ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (tb[TCA_U32_HASH]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) htid = nla_get_u32(tb[TCA_U32_HASH]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) if (TC_U32_HTID(htid) == TC_U32_ROOT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) ht = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) htid = ht->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) if (!ht) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) NL_SET_ERR_MSG_MOD(extack, "Specified hash table not found");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) ht = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) htid = ht->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (ht->divisor < TC_U32_HASH(htid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) NL_SET_ERR_MSG_MOD(extack, "Specified hash table buckets exceed configured value");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) handle = htid | TC_U32_NODE(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) handle = gen_new_kid(ht, htid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (tb[TCA_U32_SEL] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) NL_SET_ERR_MSG_MOD(extack, "Selector not specified");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) goto erridr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) s = nla_data(tb[TCA_U32_SEL]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) sel_size = struct_size(s, keys, s->nkeys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) if (nla_len(tb[TCA_U32_SEL]) < sel_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) goto erridr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) n = kzalloc(struct_size(n, sel.keys, s->nkeys), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) if (n == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) goto erridr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) #ifdef CONFIG_CLS_U32_PERF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) n->pf = __alloc_percpu(struct_size(n->pf, kcnts, s->nkeys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) __alignof__(struct tc_u32_pcnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (!n->pf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) goto errfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) memcpy(&n->sel, s, sel_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) RCU_INIT_POINTER(n->ht_up, ht);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) n->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) n->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) err = tcf_exts_init(&n->exts, net, TCA_U32_ACT, TCA_U32_POLICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) #ifdef CONFIG_CLS_U32_MARK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) n->pcpu_success = alloc_percpu(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (!n->pcpu_success) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) if (tb[TCA_U32_MARK]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct tc_u32_mark *mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) mark = nla_data(tb[TCA_U32_MARK]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) n->val = mark->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) n->mask = mark->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) err = u32_set_parms(net, tp, base, n, tb, tca[TCA_RATE], ovr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) if (err == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) struct tc_u_knode __rcu **ins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) struct tc_u_knode *pins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) err = u32_replace_hw_knode(tp, n, flags, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) goto errhw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) if (!tc_in_hw(n->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) ins = &ht->ht[TC_U32_HASH(handle)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) for (pins = rtnl_dereference(*ins); pins;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) ins = &pins->next, pins = rtnl_dereference(*ins))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) if (TC_U32_NODE(handle) < TC_U32_NODE(pins->handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) RCU_INIT_POINTER(n->next, pins);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) rcu_assign_pointer(*ins, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) tp_c->knodes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) *arg = n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) errhw:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) #ifdef CONFIG_CLS_U32_MARK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) free_percpu(n->pcpu_success);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) tcf_exts_destroy(&n->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) #ifdef CONFIG_CLS_U32_PERF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) errfree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) free_percpu(n->pf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) kfree(n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) erridr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) idr_remove(&ht->handle_idr, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) bool rtnl_held)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) struct tc_u_common *tp_c = tp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) struct tc_u_hnode *ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct tc_u_knode *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) unsigned int h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) if (arg->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) for (ht = rtnl_dereference(tp_c->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) ht = rtnl_dereference(ht->next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (ht->prio != tp->prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (arg->count >= arg->skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (arg->fn(tp, ht, arg) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) arg->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) for (h = 0; h <= ht->divisor; h++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) for (n = rtnl_dereference(ht->ht[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) n = rtnl_dereference(n->next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (arg->count < arg->skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (arg->fn(tp, n, arg) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) arg->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) static int u32_reoffload_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) bool add, flow_setup_cb_t *cb, void *cb_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) struct tc_cls_u32_offload cls_u32 = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) tc_cls_common_offload_init(&cls_u32.common, tp, ht->flags, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) cls_u32.hnode.divisor = ht->divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) cls_u32.hnode.handle = ht->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) cls_u32.hnode.prio = ht->prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) err = cb(TC_SETUP_CLSU32, &cls_u32, cb_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) if (err && add && tc_skip_sw(ht->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) static int u32_reoffload_knode(struct tcf_proto *tp, struct tc_u_knode *n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) bool add, flow_setup_cb_t *cb, void *cb_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) struct tc_u_hnode *ht = rtnl_dereference(n->ht_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) struct tcf_block *block = tp->chain->block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) struct tc_cls_u32_offload cls_u32 = {};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) tc_cls_common_offload_init(&cls_u32.common, tp, n->flags, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) cls_u32.command = add ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) TC_CLSU32_REPLACE_KNODE : TC_CLSU32_DELETE_KNODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) cls_u32.knode.handle = n->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (add) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) cls_u32.knode.fshift = n->fshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) #ifdef CONFIG_CLS_U32_MARK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) cls_u32.knode.val = n->val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) cls_u32.knode.mask = n->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) cls_u32.knode.val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) cls_u32.knode.mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) cls_u32.knode.sel = &n->sel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) cls_u32.knode.res = &n->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) cls_u32.knode.exts = &n->exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (n->ht_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) cls_u32.knode.link_handle = ht->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) &cls_u32, cb_priv, &n->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) &n->in_hw_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) void *cb_priv, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct tc_u_common *tp_c = tp->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) struct tc_u_hnode *ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) struct tc_u_knode *n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) unsigned int h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) for (ht = rtnl_dereference(tp_c->hlist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) ht;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) ht = rtnl_dereference(ht->next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (ht->prio != tp->prio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) /* When adding filters to a new dev, try to offload the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * hashtable first. When removing, do the filters before the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * hashtable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) if (add && !tc_skip_hw(ht->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) for (h = 0; h <= ht->divisor; h++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) for (n = rtnl_dereference(ht->ht[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) n = rtnl_dereference(n->next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (tc_skip_hw(n->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) err = u32_reoffload_knode(tp, n, add, cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) cb_priv, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) if (!add && !tc_skip_hw(ht->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) u32_reoffload_hnode(tp, ht, add, cb, cb_priv, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) unsigned long base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) struct tc_u_knode *n = fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) if (n && n->res.classid == classid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) __tcf_bind_filter(q, &n->res, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) __tcf_unbind_filter(q, &n->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) static int u32_dump(struct net *net, struct tcf_proto *tp, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) struct tc_u_knode *n = fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) struct tc_u_hnode *ht_up, *ht_down;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) if (n == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) t->tcm_handle = n->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) if (nest == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) if (TC_U32_KEY(n->handle) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) struct tc_u_hnode *ht = fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) u32 divisor = ht->divisor + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) #ifdef CONFIG_CLS_U32_PERF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) struct tc_u32_pcnt *gpf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (nla_put(skb, TCA_U32_SEL, struct_size(&n->sel, keys, n->sel.nkeys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) &n->sel))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) ht_up = rtnl_dereference(n->ht_up);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (ht_up) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) u32 htid = n->handle & 0xFFFFF000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (nla_put_u32(skb, TCA_U32_HASH, htid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (n->res.classid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) ht_down = rtnl_dereference(n->ht_down);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) if (ht_down &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) nla_put_u32(skb, TCA_U32_LINK, ht_down->handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (n->flags && nla_put_u32(skb, TCA_U32_FLAGS, n->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) #ifdef CONFIG_CLS_U32_MARK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if ((n->val || n->mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) struct tc_u32_mark mark = {.val = n->val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) .mask = n->mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) .success = 0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) int cpum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) for_each_possible_cpu(cpum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) mark.success += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (nla_put(skb, TCA_U32_MARK, sizeof(mark), &mark))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) if (tcf_exts_dump(skb, &n->exts) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (n->ifindex) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) struct net_device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) dev = __dev_get_by_index(net, n->ifindex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) if (dev && nla_put_string(skb, TCA_U32_INDEV, dev->name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) #ifdef CONFIG_CLS_U32_PERF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) gpf = kzalloc(struct_size(gpf, kcnts, n->sel.nkeys), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (!gpf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) gpf->rcnt += pf->rcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) gpf->rhit += pf->rhit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) for (i = 0; i < n->sel.nkeys; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) gpf->kcnts[i] += pf->kcnts[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) if (nla_put_64bit(skb, TCA_U32_PCNT, struct_size(gpf, kcnts, n->sel.nkeys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) gpf, TCA_U32_PAD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) kfree(gpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) kfree(gpf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) if (TC_U32_KEY(n->handle))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (tcf_exts_dump_stats(skb, &n->exts) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) static struct tcf_proto_ops cls_u32_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) .kind = "u32",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) .classify = u32_classify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) .init = u32_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) .destroy = u32_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) .get = u32_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) .change = u32_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) .delete = u32_delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) .walk = u32_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) .reoffload = u32_reoffload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) .dump = u32_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) .bind_class = u32_bind_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static int __init init_u32(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) pr_info("u32 classifier\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) #ifdef CONFIG_CLS_U32_PERF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) pr_info(" Performance counters on\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) pr_info(" input device check on\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) #ifdef CONFIG_NET_CLS_ACT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) pr_info(" Actions configured\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) tc_u_common_hash = kvmalloc_array(U32_HASH_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) sizeof(struct hlist_head),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) if (!tc_u_common_hash)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) for (i = 0; i < U32_HASH_SIZE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) INIT_HLIST_HEAD(&tc_u_common_hash[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) ret = register_tcf_proto_ops(&cls_u32_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) kvfree(tc_u_common_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static void __exit exit_u32(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) unregister_tcf_proto_ops(&cls_u32_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) kvfree(tc_u_common_hash);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) module_init(init_u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) module_exit(exit_u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) MODULE_LICENSE("GPL");