^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * net/sched/cls_flow.c Generic flow classifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/jhash.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/in.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/ipv6.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/if_vlan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <net/inet_sock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <net/ip.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <net/route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <net/flow_dissector.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #if IS_ENABLED(CONFIG_NF_CONNTRACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <net/netfilter/nf_conntrack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct flow_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct list_head filters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct rcu_head rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct flow_filter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct tcf_exts exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct tcf_ematch_tree ematches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct tcf_proto *tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct timer_list perturb_timer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) u32 perturb_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) u32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) u32 nkeys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) u32 keymask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) u32 xor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) u32 rshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u32 addend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u32 divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) u32 baseclass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u32 hashrnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) struct rcu_work rwork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static inline u32 addr_fold(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long a = (unsigned long)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) __be32 src = flow_get_u32_src(flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (src)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return ntohl(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return addr_fold(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) __be32 dst = flow_get_u32_dst(flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) if (dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) return ntohl(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static u32 flow_get_proto(const struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) const struct flow_keys *flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return flow->basic.ip_proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static u32 flow_get_proto_src(const struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) const struct flow_keys *flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (flow->ports.ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return ntohs(flow->ports.src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return addr_fold(skb->sk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static u32 flow_get_proto_dst(const struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) const struct flow_keys *flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (flow->ports.ports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return ntohs(flow->ports.dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static u32 flow_get_iif(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return skb->skb_iif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static u32 flow_get_priority(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return skb->priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static u32 flow_get_mark(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) return skb->mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static u32 flow_get_nfct(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #if IS_ENABLED(CONFIG_NF_CONNTRACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return addr_fold(skb_nfct(skb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #if IS_ENABLED(CONFIG_NF_CONNTRACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define CTTUPLE(skb, member) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) enum ip_conntrack_info ctinfo; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) const struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (ct == NULL) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) goto fallback; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define CTTUPLE(skb, member) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) goto fallback; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static u32 flow_get_nfct_src(const struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) const struct flow_keys *flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) switch (skb_protocol(skb, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) case htons(ETH_P_IP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return ntohl(CTTUPLE(skb, src.u3.ip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) case htons(ETH_P_IPV6):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) fallback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) return flow_get_src(skb, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static u32 flow_get_nfct_dst(const struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) const struct flow_keys *flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) switch (skb_protocol(skb, true)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) case htons(ETH_P_IP):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return ntohl(CTTUPLE(skb, dst.u3.ip));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) case htons(ETH_P_IPV6):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) fallback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return flow_get_dst(skb, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) const struct flow_keys *flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return ntohs(CTTUPLE(skb, src.u.all));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) fallback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return flow_get_proto_src(skb, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) const struct flow_keys *flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) return ntohs(CTTUPLE(skb, dst.u.all));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) fallback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return flow_get_proto_dst(skb, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static u32 flow_get_rtclassid(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #ifdef CONFIG_IP_ROUTE_CLASSID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (skb_dst(skb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return skb_dst(skb)->tclassid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static u32 flow_get_skuid(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct sock *sk = skb_to_full_sk(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (sk && sk->sk_socket && sk->sk_socket->file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) return from_kuid(&init_user_ns, skuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static u32 flow_get_skgid(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct sock *sk = skb_to_full_sk(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (sk && sk->sk_socket && sk->sk_socket->file) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return from_kgid(&init_user_ns, skgid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) static u32 flow_get_vlan_tag(const struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u16 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (vlan_get_tag(skb, &tag) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return tag & VLAN_VID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) static u32 flow_get_rxhash(struct sk_buff *skb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return skb_get_hash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) switch (key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) case FLOW_KEY_SRC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return flow_get_src(skb, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) case FLOW_KEY_DST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return flow_get_dst(skb, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) case FLOW_KEY_PROTO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return flow_get_proto(skb, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) case FLOW_KEY_PROTO_SRC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) return flow_get_proto_src(skb, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) case FLOW_KEY_PROTO_DST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return flow_get_proto_dst(skb, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) case FLOW_KEY_IIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) return flow_get_iif(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) case FLOW_KEY_PRIORITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) return flow_get_priority(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) case FLOW_KEY_MARK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return flow_get_mark(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) case FLOW_KEY_NFCT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return flow_get_nfct(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) case FLOW_KEY_NFCT_SRC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) return flow_get_nfct_src(skb, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) case FLOW_KEY_NFCT_DST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) return flow_get_nfct_dst(skb, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) case FLOW_KEY_NFCT_PROTO_SRC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) return flow_get_nfct_proto_src(skb, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) case FLOW_KEY_NFCT_PROTO_DST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) return flow_get_nfct_proto_dst(skb, flow);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) case FLOW_KEY_RTCLASSID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return flow_get_rtclassid(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) case FLOW_KEY_SKUID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return flow_get_skuid(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) case FLOW_KEY_SKGID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) return flow_get_skgid(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) case FLOW_KEY_VLAN_TAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return flow_get_vlan_tag(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) case FLOW_KEY_RXHASH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) return flow_get_rxhash(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) (1 << FLOW_KEY_DST) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) (1 << FLOW_KEY_PROTO) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) (1 << FLOW_KEY_PROTO_SRC) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) (1 << FLOW_KEY_PROTO_DST) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) (1 << FLOW_KEY_NFCT_SRC) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) (1 << FLOW_KEY_NFCT_DST) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) (1 << FLOW_KEY_NFCT_PROTO_SRC) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) (1 << FLOW_KEY_NFCT_PROTO_DST))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct tcf_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct flow_head *head = rcu_dereference_bh(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct flow_filter *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) u32 keymask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) u32 classid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) unsigned int n, key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) int r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) list_for_each_entry_rcu(f, &head->filters, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) u32 keys[FLOW_KEY_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) struct flow_keys flow_keys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (!tcf_em_tree_match(skb, &f->ematches, NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) keymask = f->keymask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (keymask & FLOW_KEYS_NEEDED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) for (n = 0; n < f->nkeys; n++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) key = ffs(keymask) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) keymask &= ~(1 << key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) keys[n] = flow_key_get(skb, key, &flow_keys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (f->mode == FLOW_MODE_HASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) classid = jhash2(keys, f->nkeys, f->hashrnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) classid = keys[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) classid = (classid & f->mask) ^ f->xor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) classid = (classid >> f->rshift) + f->addend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (f->divisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) classid %= f->divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) res->class = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) r = tcf_exts_exec(skb, &f->exts, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (r < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static void flow_perturbation(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) struct flow_filter *f = from_timer(f, t, perturb_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) get_random_bytes(&f->hashrnd, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (f->perturb_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) [TCA_FLOW_KEYS] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) [TCA_FLOW_MODE] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) [TCA_FLOW_BASECLASS] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) [TCA_FLOW_RSHIFT] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) [TCA_FLOW_ADDEND] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) [TCA_FLOW_MASK] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) [TCA_FLOW_XOR] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) [TCA_FLOW_DIVISOR] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) [TCA_FLOW_ACT] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) [TCA_FLOW_POLICE] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) [TCA_FLOW_EMATCHES] = { .type = NLA_NESTED },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) [TCA_FLOW_PERTURB] = { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static void __flow_destroy_filter(struct flow_filter *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) del_timer_sync(&f->perturb_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) tcf_exts_destroy(&f->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) tcf_em_tree_destroy(&f->ematches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) tcf_exts_put_net(&f->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) kfree(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) static void flow_destroy_filter_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) struct flow_filter *f = container_of(to_rcu_work(work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct flow_filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) rwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) __flow_destroy_filter(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) static int flow_change(struct net *net, struct sk_buff *in_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct tcf_proto *tp, unsigned long base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) u32 handle, struct nlattr **tca,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) void **arg, bool ovr, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) struct flow_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) struct flow_filter *fold, *fnew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) struct nlattr *opt = tca[TCA_OPTIONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) struct nlattr *tb[TCA_FLOW_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) unsigned int nkeys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) unsigned int perturb_period = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) u32 baseclass = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) u32 keymask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) u32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if (opt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) err = nla_parse_nested_deprecated(tb, TCA_FLOW_MAX, opt, flow_policy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (tb[TCA_FLOW_BASECLASS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (TC_H_MIN(baseclass) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (tb[TCA_FLOW_KEYS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) nkeys = hweight32(keymask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (nkeys == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (fls(keymask) - 1 > FLOW_KEY_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (!fnew)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) goto err1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) err = tcf_exts_init(&fnew->exts, net, TCA_FLOW_ACT, TCA_FLOW_POLICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) true, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) fold = *arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) if (fold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) if (fold->handle != handle && handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /* Copy fold into fnew */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) fnew->tp = fold->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) fnew->handle = fold->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) fnew->nkeys = fold->nkeys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) fnew->keymask = fold->keymask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) fnew->mode = fold->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) fnew->mask = fold->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) fnew->xor = fold->xor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) fnew->rshift = fold->rshift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) fnew->addend = fold->addend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) fnew->divisor = fold->divisor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) fnew->baseclass = fold->baseclass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) fnew->hashrnd = fold->hashrnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) mode = fold->mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (tb[TCA_FLOW_MODE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) mode = nla_get_u32(tb[TCA_FLOW_MODE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (mode != FLOW_MODE_HASH && nkeys > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) if (mode == FLOW_MODE_HASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) perturb_period = fold->perturb_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (tb[TCA_FLOW_PERTURB]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) if (mode != FLOW_MODE_HASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) err = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (!handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (!tb[TCA_FLOW_KEYS])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) mode = FLOW_MODE_MAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (tb[TCA_FLOW_MODE])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) mode = nla_get_u32(tb[TCA_FLOW_MODE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (mode != FLOW_MODE_HASH && nkeys > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) if (tb[TCA_FLOW_PERTURB]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (mode != FLOW_MODE_HASH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (TC_H_MAJ(baseclass) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) struct Qdisc *q = tcf_block_q(tp->chain->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) baseclass = TC_H_MAKE(q->handle, baseclass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (TC_H_MIN(baseclass) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) baseclass = TC_H_MAKE(baseclass, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) fnew->handle = handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) fnew->mask = ~0U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) fnew->tp = tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) get_random_bytes(&fnew->hashrnd, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) tcf_block_netif_keep_dst(tp->chain->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (tb[TCA_FLOW_KEYS]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) fnew->keymask = keymask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) fnew->nkeys = nkeys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) fnew->mode = mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (tb[TCA_FLOW_MASK])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (tb[TCA_FLOW_XOR])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (tb[TCA_FLOW_RSHIFT])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (tb[TCA_FLOW_ADDEND])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (tb[TCA_FLOW_DIVISOR])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (baseclass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) fnew->baseclass = baseclass;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) fnew->perturb_period = perturb_period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (perturb_period)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (!*arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) list_add_tail_rcu(&fnew->list, &head->filters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) list_replace_rcu(&fold->list, &fnew->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) *arg = fnew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) if (fold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) tcf_exts_get_net(&fold->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) tcf_queue_work(&fold->rwork, flow_destroy_filter_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) tcf_exts_destroy(&fnew->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) tcf_em_tree_destroy(&fnew->ematches);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) err1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) kfree(fnew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static int flow_delete(struct tcf_proto *tp, void *arg, bool *last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) bool rtnl_held, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct flow_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) struct flow_filter *f = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) list_del_rcu(&f->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) tcf_exts_get_net(&f->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) tcf_queue_work(&f->rwork, flow_destroy_filter_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) *last = list_empty(&head->filters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) static int flow_init(struct tcf_proto *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct flow_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) head = kzalloc(sizeof(*head), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (head == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) INIT_LIST_HEAD(&head->filters);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) rcu_assign_pointer(tp->root, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) static void flow_destroy(struct tcf_proto *tp, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct flow_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct flow_filter *f, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) list_for_each_entry_safe(f, next, &head->filters, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) list_del_rcu(&f->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (tcf_exts_get_net(&f->exts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) tcf_queue_work(&f->rwork, flow_destroy_filter_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) __flow_destroy_filter(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) kfree_rcu(head, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static void *flow_get(struct tcf_proto *tp, u32 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) struct flow_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct flow_filter *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) list_for_each_entry(f, &head->filters, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (f->handle == handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) struct flow_filter *f = fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) if (f == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) t->tcm_handle = f->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) if (nest == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (f->mask != ~0 || f->xor != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (f->rshift &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (f->addend &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (f->divisor &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if (f->baseclass &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) if (f->perturb_period &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) if (tcf_exts_dump(skb, &f->exts) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) #ifdef CONFIG_NET_EMATCH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) if (f->ematches.hdr.nmatches &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) if (tcf_exts_dump_stats(skb, &f->exts) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) bool rtnl_held)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) struct flow_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) struct flow_filter *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) list_for_each_entry(f, &head->filters, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (arg->count < arg->skip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) goto skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) if (arg->fn(tp, f, arg) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) arg->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) skip:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) static struct tcf_proto_ops cls_flow_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) .kind = "flow",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) .classify = flow_classify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) .init = flow_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) .destroy = flow_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) .change = flow_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) .delete = flow_delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) .get = flow_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) .dump = flow_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) .walk = flow_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static int __init cls_flow_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return register_tcf_proto_ops(&cls_flow_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) static void __exit cls_flow_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) unregister_tcf_proto_ops(&cls_flow_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) module_init(cls_flow_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) module_exit(cls_flow_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) MODULE_DESCRIPTION("TC flow classifier");