Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) #ifndef _NET_FLOW_OFFLOAD_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #define _NET_FLOW_OFFLOAD_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/netlink.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <net/flow_dissector.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) struct flow_match {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 	struct flow_dissector	*dissector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 	void			*mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 	void			*key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) struct flow_match_meta {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	struct flow_dissector_key_meta *key, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) struct flow_match_basic {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	struct flow_dissector_key_basic *key, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) struct flow_match_control {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	struct flow_dissector_key_control *key, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) struct flow_match_eth_addrs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	struct flow_dissector_key_eth_addrs *key, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) struct flow_match_vlan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	struct flow_dissector_key_vlan *key, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) struct flow_match_ipv4_addrs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct flow_dissector_key_ipv4_addrs *key, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) struct flow_match_ipv6_addrs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct flow_dissector_key_ipv6_addrs *key, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) struct flow_match_ip {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	struct flow_dissector_key_ip *key, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) struct flow_match_ports {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	struct flow_dissector_key_ports *key, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) struct flow_match_icmp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	struct flow_dissector_key_icmp *key, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) struct flow_match_tcp {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	struct flow_dissector_key_tcp *key, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) struct flow_match_mpls {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	struct flow_dissector_key_mpls *key, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) struct flow_match_enc_keyid {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	struct flow_dissector_key_keyid *key, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) struct flow_match_enc_opts {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	struct flow_dissector_key_enc_opts *key, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) struct flow_match_ct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	struct flow_dissector_key_ct *key, *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) struct flow_rule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) void flow_rule_match_meta(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 			  struct flow_match_meta *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) void flow_rule_match_basic(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			   struct flow_match_basic *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) void flow_rule_match_control(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 			     struct flow_match_control *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) void flow_rule_match_eth_addrs(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 			       struct flow_match_eth_addrs *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) void flow_rule_match_vlan(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 			  struct flow_match_vlan *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) void flow_rule_match_cvlan(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 			   struct flow_match_vlan *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 				struct flow_match_ipv4_addrs *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 				struct flow_match_ipv6_addrs *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) void flow_rule_match_ip(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 			struct flow_match_ip *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) void flow_rule_match_ports(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 			   struct flow_match_ports *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) void flow_rule_match_tcp(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 			 struct flow_match_tcp *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) void flow_rule_match_icmp(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 			  struct flow_match_icmp *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) void flow_rule_match_mpls(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			  struct flow_match_mpls *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void flow_rule_match_enc_control(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 				 struct flow_match_control *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 				    struct flow_match_ipv4_addrs *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 				    struct flow_match_ipv6_addrs *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) void flow_rule_match_enc_ip(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			    struct flow_match_ip *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void flow_rule_match_enc_ports(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			       struct flow_match_ports *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) void flow_rule_match_enc_keyid(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			       struct flow_match_enc_keyid *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) void flow_rule_match_enc_opts(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			      struct flow_match_enc_opts *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) void flow_rule_match_ct(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 			struct flow_match_ct *out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) enum flow_action_id {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	FLOW_ACTION_ACCEPT		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	FLOW_ACTION_DROP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	FLOW_ACTION_TRAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	FLOW_ACTION_GOTO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	FLOW_ACTION_REDIRECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	FLOW_ACTION_MIRRED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	FLOW_ACTION_REDIRECT_INGRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	FLOW_ACTION_MIRRED_INGRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	FLOW_ACTION_VLAN_PUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	FLOW_ACTION_VLAN_POP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	FLOW_ACTION_VLAN_MANGLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	FLOW_ACTION_TUNNEL_ENCAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	FLOW_ACTION_TUNNEL_DECAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	FLOW_ACTION_MANGLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	FLOW_ACTION_ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	FLOW_ACTION_CSUM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	FLOW_ACTION_MARK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	FLOW_ACTION_PTYPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	FLOW_ACTION_PRIORITY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	FLOW_ACTION_WAKE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	FLOW_ACTION_QUEUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	FLOW_ACTION_SAMPLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	FLOW_ACTION_POLICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	FLOW_ACTION_CT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	FLOW_ACTION_CT_METADATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	FLOW_ACTION_MPLS_PUSH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	FLOW_ACTION_MPLS_POP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	FLOW_ACTION_MPLS_MANGLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	FLOW_ACTION_GATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	NUM_FLOW_ACTIONS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* This is mirroring enum pedit_header_type definition for easy mapping between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) enum flow_action_mangle_base {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	FLOW_ACT_MANGLE_UNSPEC		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	FLOW_ACT_MANGLE_HDR_TYPE_ETH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	FLOW_ACT_MANGLE_HDR_TYPE_IP4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	FLOW_ACT_MANGLE_HDR_TYPE_IP6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	FLOW_ACT_MANGLE_HDR_TYPE_TCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	FLOW_ACT_MANGLE_HDR_TYPE_UDP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) enum flow_action_hw_stats_bit {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	FLOW_ACTION_HW_STATS_IMMEDIATE_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	FLOW_ACTION_HW_STATS_DELAYED_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	FLOW_ACTION_HW_STATS_DISABLED_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	FLOW_ACTION_HW_STATS_NUM_BITS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) enum flow_action_hw_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	FLOW_ACTION_HW_STATS_IMMEDIATE =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 				   FLOW_ACTION_HW_STATS_DELAYED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	FLOW_ACTION_HW_STATS_DISABLED =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	FLOW_ACTION_HW_STATS_DONT_CARE = BIT(FLOW_ACTION_HW_STATS_NUM_BITS) - 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) typedef void (*action_destr)(void *priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) struct flow_action_cookie {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	u32 cookie_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	u8 cookie[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct flow_action_cookie *flow_action_cookie_create(void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 						     unsigned int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 						     gfp_t gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) struct flow_action_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	enum flow_action_id		id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	enum flow_action_hw_stats	hw_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	action_destr			destructor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	void				*destructor_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		u32			chain_index;	/* FLOW_ACTION_GOTO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		struct net_device	*dev;		/* FLOW_ACTION_REDIRECT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		struct {				/* FLOW_ACTION_VLAN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			u16		vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			__be16		proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			u8		prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		} vlan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		struct {				/* FLOW_ACTION_MANGLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 							/* FLOW_ACTION_ADD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 			enum flow_action_mangle_base htype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 			u32		offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 			u32		mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			u32		val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		} mangle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		struct ip_tunnel_info	*tunnel;	/* FLOW_ACTION_TUNNEL_ENCAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		u32			csum_flags;	/* FLOW_ACTION_CSUM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		u32			mark;		/* FLOW_ACTION_MARK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		u16                     ptype;          /* FLOW_ACTION_PTYPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		u32			priority;	/* FLOW_ACTION_PRIORITY */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		struct {				/* FLOW_ACTION_QUEUE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			u32		ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			u32		index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			u8		vf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		} queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		struct {				/* FLOW_ACTION_SAMPLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			struct psample_group	*psample_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			u32			rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 			u32			trunc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 			bool			truncate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		} sample;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		struct {				/* FLOW_ACTION_POLICE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			u32			index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			u32			burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 			u64			rate_bytes_ps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			u32			mtu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		} police;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		struct {				/* FLOW_ACTION_CT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			int action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			u16 zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			struct nf_flowtable *flow_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		} ct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 			unsigned long cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 			u32 mark;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			u32 labels[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		} ct_metadata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		struct {				/* FLOW_ACTION_MPLS_PUSH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 			u32		label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 			__be16		proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			u8		tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 			u8		bos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			u8		ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		} mpls_push;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		struct {				/* FLOW_ACTION_MPLS_POP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 			__be16		proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		} mpls_pop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		struct {				/* FLOW_ACTION_MPLS_MANGLE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			u32		label;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 			u8		tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 			u8		bos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			u8		ttl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		} mpls_mangle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			u32		index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 			s32		prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			u64		basetime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 			u64		cycletime;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 			u64		cycletimeext;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 			u32		num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			struct action_gate_entry *entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		} gate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	struct flow_action_cookie *cookie; /* user defined action cookie */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct flow_action {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	unsigned int			num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	struct flow_action_entry	entries[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static inline bool flow_action_has_entries(const struct flow_action *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	return action->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * flow_action_has_one_action() - check if exactly one action is present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * @action: tc filter flow offload action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  * Returns true if exactly one action is present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static inline bool flow_offload_has_one_action(const struct flow_action *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	return action->num_entries == 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #define flow_action_for_each(__i, __act, __actions)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)         for (__i = 0, __act = &(__actions)->entries[0];			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	     __i < (__actions)->num_entries;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	     __act = &(__actions)->entries[++__i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) flow_action_mixed_hw_stats_check(const struct flow_action *action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 				 struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	const struct flow_action_entry *action_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	u8 last_hw_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	if (flow_offload_has_one_action(action))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	flow_action_for_each(i, action_entry, action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		if (i && action_entry->hw_stats != last_hw_stats) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 			NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		last_hw_stats = action_entry->hw_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static inline const struct flow_action_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) flow_action_first_entry_get(const struct flow_action *action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	WARN_ON(!flow_action_has_entries(action));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	return &action->entries[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) __flow_action_hw_stats_check(const struct flow_action *action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			     struct netlink_ext_ack *extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			     bool check_allow_bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			     enum flow_action_hw_stats_bit allow_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	const struct flow_action_entry *action_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	if (!flow_action_has_entries(action))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	if (!flow_action_mixed_hw_stats_check(action, extack))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	action_entry = flow_action_first_entry_get(action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	/* Zero is not a legal value for hw_stats, catch anyone passing it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	WARN_ON_ONCE(!action_entry->hw_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	if (!check_allow_bit &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	    ~action_entry->hw_stats & FLOW_ACTION_HW_STATS_ANY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	} else if (check_allow_bit &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		   !(action_entry->hw_stats & BIT(allow_bit))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		NL_SET_ERR_MSG_MOD(extack, "Driver does not support selected HW stats type");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) flow_action_hw_stats_check(const struct flow_action *action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			   struct netlink_ext_ack *extack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 			   enum flow_action_hw_stats_bit allow_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	return __flow_action_hw_stats_check(action, extack, true, allow_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) flow_action_basic_hw_stats_check(const struct flow_action *action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 				 struct netlink_ext_ack *extack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	return __flow_action_hw_stats_check(action, extack, false, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) struct flow_rule {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	struct flow_match	match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	struct flow_action	action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct flow_rule *flow_rule_alloc(unsigned int num_actions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) static inline bool flow_rule_match_key(const struct flow_rule *rule,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 				       enum flow_dissector_key_id key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	return dissector_uses_key(rule->match.dissector, key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct flow_stats {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	u64	pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	u64	bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	u64	drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	u64	lastused;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	enum flow_action_hw_stats used_hw_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	bool used_hw_stats_valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static inline void flow_stats_update(struct flow_stats *flow_stats,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 				     u64 bytes, u64 pkts,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 				     u64 drops, u64 lastused,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 				     enum flow_action_hw_stats used_hw_stats)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	flow_stats->pkts	+= pkts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	flow_stats->bytes	+= bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	flow_stats->drops	+= drops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	flow_stats->lastused	= max_t(u64, flow_stats->lastused, lastused);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	/* The driver should pass value with a maximum of one bit set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	 * Passing FLOW_ACTION_HW_STATS_ANY is invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	WARN_ON(used_hw_stats == FLOW_ACTION_HW_STATS_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	flow_stats->used_hw_stats |= used_hw_stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	flow_stats->used_hw_stats_valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) enum flow_block_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	FLOW_BLOCK_BIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	FLOW_BLOCK_UNBIND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) enum flow_block_binder_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	FLOW_BLOCK_BINDER_TYPE_UNSPEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	FLOW_BLOCK_BINDER_TYPE_RED_MARK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) struct flow_block {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	struct list_head cb_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct netlink_ext_ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct flow_block_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	enum flow_block_command command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	enum flow_block_binder_type binder_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	bool block_shared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	bool unlocked_driver_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	struct net *net;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	struct flow_block *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	struct list_head cb_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	struct list_head *driver_block_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	struct netlink_ext_ack *extack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	struct Qdisc *sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	struct list_head *cb_list_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) enum tc_setup_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 			    void *cb_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct flow_block_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct flow_block_indr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	struct list_head		list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	struct net_device		*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	struct Qdisc			*sch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	enum flow_block_binder_type	binder_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	void				*data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	void				*cb_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	void				(*cleanup)(struct flow_block_cb *block_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) struct flow_block_cb {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	struct list_head	driver_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	struct list_head	list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	flow_setup_cb_t		*cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	void			*cb_ident;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	void			*cb_priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	void			(*release)(void *cb_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	struct flow_block_indr	indr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	unsigned int		refcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 					  void *cb_ident, void *cb_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 					  void (*release)(void *cb_priv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct flow_block_cb *flow_indr_block_cb_alloc(flow_setup_cb_t *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 					       void *cb_ident, void *cb_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 					       void (*release)(void *cb_priv),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 					       struct flow_block_offload *bo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 					       struct net_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 					       struct Qdisc *sch, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 					       void *indr_cb_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 					       void (*cleanup)(struct flow_block_cb *block_cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) void flow_block_cb_free(struct flow_block_cb *block_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 					   flow_setup_cb_t *cb, void *cb_ident);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) void *flow_block_cb_priv(struct flow_block_cb *block_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) void flow_block_cb_incref(struct flow_block_cb *block_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static inline void flow_block_cb_add(struct flow_block_cb *block_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 				     struct flow_block_offload *offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	list_add_tail(&block_cb->list, &offload->cb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 					struct flow_block_offload *offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	list_move(&block_cb->list, &offload->cb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) static inline void flow_indr_block_cb_remove(struct flow_block_cb *block_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 					     struct flow_block_offload *offload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	list_del(&block_cb->indr.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	list_move(&block_cb->list, &offload->cb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 			   struct list_head *driver_block_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) int flow_block_cb_setup_simple(struct flow_block_offload *f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 			       struct list_head *driver_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 			       flow_setup_cb_t *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 			       void *cb_ident, void *cb_priv, bool ingress_only);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) enum flow_cls_command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	FLOW_CLS_REPLACE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	FLOW_CLS_DESTROY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	FLOW_CLS_STATS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	FLOW_CLS_TMPLT_CREATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	FLOW_CLS_TMPLT_DESTROY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) struct flow_cls_common_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	u32 chain_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	__be16 protocol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	u32 prio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	struct netlink_ext_ack *extack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct flow_cls_offload {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	struct flow_cls_common_offload common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 	enum flow_cls_command command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	unsigned long cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	struct flow_rule *rule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	struct flow_stats stats;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	u32 classid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static inline struct flow_rule *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	return flow_cmd->rule;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static inline void flow_block_init(struct flow_block *flow_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	INIT_LIST_HEAD(&flow_block->cb_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) typedef int flow_indr_block_bind_cb_t(struct net_device *dev, struct Qdisc *sch, void *cb_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 				      enum tc_setup_type type, void *type_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 				      void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 				      void (*cleanup)(struct flow_block_cb *block_cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 			      void (*release)(void *cb_priv));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 				enum tc_setup_type type, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 				struct flow_block_offload *bo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 				void (*cleanup)(struct flow_block_cb *block_cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) #endif /* _NET_FLOW_OFFLOAD_H */