Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * net/sched/cls_route.c	ROUTE4 classifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/skbuff.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <net/dst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <net/route.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <net/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <net/act_api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <net/pkt_cls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * 1. For now we assume that route tags < 256.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *    It allows to use direct table lookups, instead of hash tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * 2. For now we assume that "from TAG" and "fromdev DEV" statements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *    are mutually  exclusive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) struct route4_fastmap {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	struct route4_filter		*filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	u32				id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	int				iif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) struct route4_head {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	struct route4_fastmap		fastmap[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct route4_bucket __rcu	*table[256 + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	struct rcu_head			rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) struct route4_bucket {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	struct route4_filter __rcu	*ht[16 + 16 + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	struct rcu_head			rcu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) struct route4_filter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	struct route4_filter __rcu	*next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	u32			id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	int			iif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct tcf_result	res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	struct tcf_exts		exts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	u32			handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct route4_bucket	*bkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	struct tcf_proto	*tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	struct rcu_work		rwork;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) static inline int route4_fastmap_hash(u32 id, int iif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	return id & 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) static DEFINE_SPINLOCK(fastmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) route4_reset_fastmap(struct route4_head *head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	spin_lock_bh(&fastmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	memset(head->fastmap, 0, sizeof(head->fastmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	spin_unlock_bh(&fastmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) route4_set_fastmap(struct route4_head *head, u32 id, int iif,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		   struct route4_filter *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	int h = route4_fastmap_hash(id, iif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	/* fastmap updates must look atomic to aling id, iff, filter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	spin_lock_bh(&fastmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	head->fastmap[h].id = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	head->fastmap[h].iif = iif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	head->fastmap[h].filter = f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	spin_unlock_bh(&fastmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static inline int route4_hash_to(u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	return id & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) static inline int route4_hash_from(u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	return (id >> 16) & 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) static inline int route4_hash_iif(int iif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	return 16 + ((iif >> 16) & 0xF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline int route4_hash_wild(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	return 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define ROUTE4_APPLY_RESULT()					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	*res = f->res;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	if (tcf_exts_has_actions(&f->exts)) {			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 		int r = tcf_exts_exec(skb, &f->exts, res);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		if (r < 0) {					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			dont_cache = 1;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			continue;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		}						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		return r;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	} else if (!dont_cache)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		route4_set_fastmap(head, id, iif, f);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	return 0;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 			   struct tcf_result *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	struct route4_head *head = rcu_dereference_bh(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	struct dst_entry *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	struct route4_bucket *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	struct route4_filter *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	u32 id, h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	int iif, dont_cache = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	dst = skb_dst(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	if (!dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	id = dst->tclassid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	iif = inet_iif(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	h = route4_fastmap_hash(id, iif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	spin_lock(&fastmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	if (id == head->fastmap[h].id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	    iif == head->fastmap[h].iif &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	    (f = head->fastmap[h].filter) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		if (f == ROUTE4_FAILURE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			spin_unlock(&fastmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 			goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		*res = f->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		spin_unlock(&fastmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	spin_unlock(&fastmap_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	h = route4_hash_to(id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	b = rcu_dereference_bh(head->table[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	if (b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		for (f = rcu_dereference_bh(b->ht[route4_hash_from(id)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		     f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		     f = rcu_dereference_bh(f->next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 			if (f->id == id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 				ROUTE4_APPLY_RESULT();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		for (f = rcu_dereference_bh(b->ht[route4_hash_iif(iif)]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		     f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		     f = rcu_dereference_bh(f->next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 			if (f->iif == iif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 				ROUTE4_APPLY_RESULT();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		for (f = rcu_dereference_bh(b->ht[route4_hash_wild()]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		     f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		     f = rcu_dereference_bh(f->next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			ROUTE4_APPLY_RESULT();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	if (h < 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		h = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		id &= ~0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	if (!dont_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) static inline u32 to_hash(u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	u32 h = id & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	if (id & 0x8000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		h += 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	return h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static inline u32 from_hash(u32 id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	id &= 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	if (id == 0xFFFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		return 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	if (!(id & 0x8000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		if (id > 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			return 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		return id & 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	return 16 + (id & 0xF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) static void *route4_get(struct tcf_proto *tp, u32 handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	struct route4_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	struct route4_bucket *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	struct route4_filter *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	unsigned int h1, h2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	h1 = to_hash(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	if (h1 > 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	h2 = from_hash(handle >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (h2 > 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	b = rtnl_dereference(head->table[h1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	if (b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		for (f = rtnl_dereference(b->ht[h2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		     f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		     f = rtnl_dereference(f->next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			if (f->handle == handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 				return f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static int route4_init(struct tcf_proto *tp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	struct route4_head *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	if (head == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	rcu_assign_pointer(tp->root, head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) static void __route4_delete_filter(struct route4_filter *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	tcf_exts_destroy(&f->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	tcf_exts_put_net(&f->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	kfree(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static void route4_delete_filter_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	struct route4_filter *f = container_of(to_rcu_work(work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 					       struct route4_filter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 					       rwork);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	rtnl_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	__route4_delete_filter(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	rtnl_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static void route4_queue_work(struct route4_filter *f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	tcf_queue_work(&f->rwork, route4_delete_filter_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static void route4_destroy(struct tcf_proto *tp, bool rtnl_held,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			   struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	struct route4_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	int h1, h2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (head == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	for (h1 = 0; h1 <= 256; h1++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		struct route4_bucket *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 		b = rtnl_dereference(head->table[h1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		if (b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			for (h2 = 0; h2 <= 32; h2++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 				struct route4_filter *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 				while ((f = rtnl_dereference(b->ht[h2])) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 					struct route4_filter *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 					next = rtnl_dereference(f->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 					RCU_INIT_POINTER(b->ht[h2], next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 					tcf_unbind_filter(tp, &f->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 					if (tcf_exts_get_net(&f->exts))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 						route4_queue_work(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 					else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 						__route4_delete_filter(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			RCU_INIT_POINTER(head->table[h1], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 			kfree_rcu(b, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	kfree_rcu(head, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static int route4_delete(struct tcf_proto *tp, void *arg, bool *last,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			 bool rtnl_held, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	struct route4_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	struct route4_filter *f = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	struct route4_filter __rcu **fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	struct route4_filter *nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	struct route4_bucket *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	unsigned int h = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	int i, h1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	if (!head || !f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	h = f->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	b = f->bkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	fp = &b->ht[from_hash(h >> 16)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	for (nf = rtnl_dereference(*fp); nf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	     fp = &nf->next, nf = rtnl_dereference(*fp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		if (nf == f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			/* unlink it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 			RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			/* Remove any fastmap lookups that might ref filter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			 * notice we unlink'd the filter so we can't get it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			 * back in the fastmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			route4_reset_fastmap(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			/* Delete it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 			tcf_unbind_filter(tp, &f->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 			tcf_exts_get_net(&f->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			tcf_queue_work(&f->rwork, route4_delete_filter_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 			/* Strip RTNL protected tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			for (i = 0; i <= 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 				struct route4_filter *rt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 				rt = rtnl_dereference(b->ht[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 				if (rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 					goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			/* OK, session has no flows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			RCU_INIT_POINTER(head->table[to_hash(h)], NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			kfree_rcu(b, rcu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	*last = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	for (h1 = 0; h1 <= 256; h1++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		if (rcu_access_pointer(head->table[h1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 			*last = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	[TCA_ROUTE4_CLASSID]	= { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	[TCA_ROUTE4_TO]		= { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	[TCA_ROUTE4_FROM]	= { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	[TCA_ROUTE4_IIF]	= { .type = NLA_U32 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static int route4_set_parms(struct net *net, struct tcf_proto *tp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 			    unsigned long base, struct route4_filter *f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			    u32 handle, struct route4_head *head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			    struct nlattr **tb, struct nlattr *est, int new,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			    bool ovr, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	u32 id = 0, to = 0, nhandle = 0x8000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	struct route4_filter *fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	unsigned int h1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	struct route4_bucket *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	err = tcf_exts_validate(net, tp, tb, est, &f->exts, ovr, true, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	if (tb[TCA_ROUTE4_TO]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		if (new && handle & 0x8000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		to = nla_get_u32(tb[TCA_ROUTE4_TO]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		if (to > 0xFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		nhandle = to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	if (tb[TCA_ROUTE4_FROM]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		if (tb[TCA_ROUTE4_IIF])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		if (id > 0xFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		nhandle |= id << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	} else if (tb[TCA_ROUTE4_IIF]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		if (id > 0x7FFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		nhandle |= (id | 0x8000) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		nhandle |= 0xFFFF << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	if (handle && new) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		nhandle |= handle & 0x7F00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		if (nhandle != handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	h1 = to_hash(nhandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	b = rtnl_dereference(head->table[h1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	if (!b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		if (b == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 			return -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		rcu_assign_pointer(head->table[h1], b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		unsigned int h2 = from_hash(nhandle >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		for (fp = rtnl_dereference(b->ht[h2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		     fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		     fp = rtnl_dereference(fp->next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			if (fp->handle == f->handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 				return -EEXIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	if (tb[TCA_ROUTE4_TO])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		f->id = to;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	if (tb[TCA_ROUTE4_FROM])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		f->id = to | id<<16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	else if (tb[TCA_ROUTE4_IIF])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		f->iif = id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	f->handle = nhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	f->bkt = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	f->tp = tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	if (tb[TCA_ROUTE4_CLASSID]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		tcf_bind_filter(tp, &f->res, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static int route4_change(struct net *net, struct sk_buff *in_skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			 struct tcf_proto *tp, unsigned long base, u32 handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 			 struct nlattr **tca, void **arg, bool ovr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 			 bool rtnl_held, struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	struct route4_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	struct route4_filter __rcu **fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	struct route4_filter *fold, *f1, *pfp, *f = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	struct route4_bucket *b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	struct nlattr *opt = tca[TCA_OPTIONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	struct nlattr *tb[TCA_ROUTE4_MAX + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	unsigned int h, th;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	bool new = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	if (opt == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		return handle ? -EINVAL : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	err = nla_parse_nested_deprecated(tb, TCA_ROUTE4_MAX, opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 					  route4_policy, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	fold = *arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	if (fold && handle && fold->handle != handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	err = -ENOBUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	if (!f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	err = tcf_exts_init(&f->exts, net, TCA_ROUTE4_ACT, TCA_ROUTE4_POLICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	if (fold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		f->id = fold->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		f->iif = fold->iif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		f->res = fold->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 		f->handle = fold->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		f->tp = fold->tp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		f->bkt = fold->bkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		new = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	err = route4_set_parms(net, tp, base, f, handle, head, tb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 			       tca[TCA_RATE], new, ovr, extack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	if (err < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	h = from_hash(f->handle >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	fp = &f->bkt->ht[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	for (pfp = rtnl_dereference(*fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	     (f1 = rtnl_dereference(*fp)) != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	     fp = &f1->next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		if (f->handle < f1->handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	tcf_block_netif_keep_dst(tp->chain->block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	rcu_assign_pointer(f->next, f1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	rcu_assign_pointer(*fp, f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	if (fold && fold->handle && f->handle != fold->handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		th = to_hash(fold->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		h = from_hash(fold->handle >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		b = rtnl_dereference(head->table[th]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		if (b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 			fp = &b->ht[h];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 			for (pfp = rtnl_dereference(*fp); pfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 			     fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 				if (pfp == fold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 					rcu_assign_pointer(*fp, fold->next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	route4_reset_fastmap(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	*arg = f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	if (fold) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 		tcf_unbind_filter(tp, &fold->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		tcf_exts_get_net(&fold->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		tcf_queue_work(&fold->rwork, route4_delete_filter_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	if (f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		tcf_exts_destroy(&f->exts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	kfree(f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 			bool rtnl_held)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	struct route4_head *head = rtnl_dereference(tp->root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	unsigned int h, h1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	if (head == NULL || arg->stop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	for (h = 0; h <= 256; h++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		struct route4_bucket *b = rtnl_dereference(head->table[h]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		if (b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 			for (h1 = 0; h1 <= 32; h1++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 				struct route4_filter *f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 				for (f = rtnl_dereference(b->ht[h1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 				     f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 				     f = rtnl_dereference(f->next)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 					if (arg->count < arg->skip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 						arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 						continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 					if (arg->fn(tp, f, arg) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 						arg->stop = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 						return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 					arg->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static int route4_dump(struct net *net, struct tcf_proto *tp, void *fh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		       struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	struct route4_filter *f = fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	struct nlattr *nest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	if (f == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	t->tcm_handle = f->handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	if (nest == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	if (!(f->handle & 0x8000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		id = f->id & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 			goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	if (f->handle & 0x80000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		if ((f->handle >> 16) != 0xFFFF &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		    nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 			goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		id = f->id >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 		if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 			goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	if (f->res.classid &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	    nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	if (tcf_exts_dump(skb, &f->exts) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	nla_nest_end(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 		goto nla_put_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	return skb->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) nla_put_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	nla_nest_cancel(skb, nest);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 			      unsigned long base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	struct route4_filter *f = fh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	if (f && f->res.classid == classid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 		if (cl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 			__tcf_bind_filter(q, &f->res, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 			__tcf_unbind_filter(q, &f->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static struct tcf_proto_ops cls_route4_ops __read_mostly = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	.kind		=	"route",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	.classify	=	route4_classify,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	.init		=	route4_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	.destroy	=	route4_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	.get		=	route4_get,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	.change		=	route4_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	.delete		=	route4_delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	.walk		=	route4_walk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	.dump		=	route4_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	.bind_class	=	route4_bind_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	.owner		=	THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) static int __init init_route4(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	return register_tcf_proto_ops(&cls_route4_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static void __exit exit_route4(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	unregister_tcf_proto_ops(&cls_route4_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) module_init(init_route4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) module_exit(exit_route4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) MODULE_LICENSE("GPL");